From a4224e58ccf864cbb3071cef7fe4b76dffe07d1f Mon Sep 17 00:00:00 2001 From: Vishesh Date: Mon, 6 Jan 2025 16:42:37 +0530 Subject: [PATCH 1/5] Improve logging to include more identifiable information (#9873) * Improve logging to include more identifiable information for kvm plugin * Update logging for scaleio plugin * Improve logging to include more identifiable information for default volume storage plugin * Improve logging to include more identifiable information for agent managers * Improve logging to include more identifiable information for Listeners * Replace ids with objects or uuids * Improve logging to include more identifiable information for engine * Improve logging to include more identifiable information for server * Fixups in engine * Improve logging to include more identifiable information for plugins * Improve logging to include more identifiable information for Cmd classes * Fix toString method for StorageFilterTO.java --- .../src/main/java/com/cloud/agent/Agent.java | 44 +- .../cloud/agent/api/to/LoadBalancerTO.java | 24 +- .../java/com/cloud/agent/api/to/NfsTO.java | 8 + .../java/com/cloud/agent/api/to/S3TO.java | 8 + .../cloud/agent/api/to/StorageFilerTO.java | 3 +- .../java/com/cloud/agent/api/to/SwiftTO.java | 8 + .../java/com/cloud/network/Ipv6Service.java | 2 +- .../com/cloud/network/NetworkProfile.java | 8 + .../cloud/network/lb/LoadBalancingRule.java | 4 + .../network/vpn/RemoteAccessVpnService.java | 2 +- .../ha/GlobalLoadBalancingRulesService.java | 3 +- .../main/java/com/cloud/vm/NicProfile.java | 5 +- .../api/command/admin/host/UpdateHostCmd.java | 5 +- .../CreateSnapshotFromVMSnapshotCmd.java | 15 +- .../command/user/vpn/RemoveVpnUserCmd.java | 2 +- .../cluster/ClusterDrsAlgorithm.java | 7 +- .../cloudstack/vm/UnmanagedInstanceTO.java | 26 +- .../agent/api/to/LoadBalancerTOTest.java | 20 +- .../com/cloud/agent/api/ReadyCommand.java | 20 +- .../com/cloud/agent/api/StartupAnswer.java | 14 +- .../cloudstack/storage/to/ImageStoreTO.java | 13 +- .../storage/to/PrimaryDataStoreTO.java | 13 +- .../storage/to/TemplateObjectTO.java | 5 +- .../cloudstack/storage/to/VolumeObjectTO.java | 5 +- .../subsystem/api/storage/EndPoint.java | 2 + .../api/storage/HypervisorHostListener.java | 10 + .../subsystem/api/storage/TemplateInfo.java | 2 + .../api/storage/TemplateService.java | 3 +- .../main/java/com/cloud/agent/Listener.java | 8 + .../com/cloud/capacity/CapacityManager.java | 2 +- .../configuration/ConfigurationManager.java | 4 +- .../com/cloud/network/IpAddressManager.java | 5 +- .../network/lb/LoadBalancingRulesManager.java | 4 +- .../cloud/network/rules/FirewallManager.java | 6 +- .../com/cloud/network/rules/RulesManager.java | 5 +- .../security/SecurityGroupManager.java | 5 +- .../com/cloud/network/vpc/VpcManager.java | 3 + .../com/cloud/resource/ResourceManager.java | 2 +- .../com/cloud/storage/StorageManager.java | 4 +- .../com/cloud/template/TemplateManager.java | 4 +- .../com/cloud/vm/VmWorkJobHandlerProxy.java | 8 +- .../com/cloud/agent/manager/AgentAttache.java | 20 +- .../cloud/agent/manager/AgentManagerImpl.java | 187 ++++--- .../agent/manager/ClusteredAgentAttache.java | 8 +- .../manager/ClusteredAgentManagerImpl.java | 122 +++-- .../manager/ClusteredDirectAgentAttache.java | 8 +- .../agent/manager/ConnectedAgentAttache.java | 4 +- .../agent/manager/DirectAgentAttache.java | 27 +- .../com/cloud/agent/manager/DummyAttache.java | 4 +- .../agentlb/AgentLoadBalancerPlanner.java | 3 +- .../ClusterBasedAgentLoadBalancerPlanner.java | 21 +- .../cloud/vm/VirtualMachineManagerImpl.java | 111 ++-- .../vm/VirtualMachinePowerStateSync.java | 5 +- .../vm/VirtualMachinePowerStateSyncImpl.java | 80 +-- .../cloud/vm/VmWorkJobWakeupDispatcher.java | 11 +- .../api/DataCenterResourceManagerImpl.java | 2 +- .../entity/api/db/EngineClusterVO.java | 8 + .../entity/api/db/EngineDataCenterVO.java | 8 + .../entity/api/db/EngineHostPodVO.java | 8 + .../entity/api/db/EngineHostVO.java | 5 +- .../api/db/dao/EngineClusterDaoImpl.java | 2 +- .../api/db/dao/EngineDataCenterDaoImpl.java | 2 +- .../entity/api/db/dao/EngineHostDaoImpl.java | 2 +- .../api/db/dao/EngineHostPodDaoImpl.java | 2 +- .../orchestration/NetworkOrchestrator.java | 263 +++++----- .../orchestration/StorageOrchestrator.java | 11 +- .../orchestration/VolumeOrchestrator.java | 4 +- .../agent/manager/AgentManagerImplTest.java | 2 +- .../manager/ConnectedAgentAttacheTest.java | 16 +- .../agent/manager/DirectAgentAttacheTest.java | 6 +- .../src/main/java/com/cloud/dc/HostPodVO.java | 8 + .../src/main/java/com/cloud/dc/VlanVO.java | 24 +- .../java/com/cloud/dc/VmwareDatacenterVO.java | 5 +- .../main/java/com/cloud/domain/DomainVO.java | 5 +- .../src/main/java/com/cloud/host/HostVO.java | 2 +- .../cloud/network/LBHealthCheckPolicyVO.java | 8 + .../cloud/network/as/AutoScalePolicyVO.java | 5 +- .../cloud/network/as/AutoScaleVmGroupVO.java | 9 +- .../network/as/AutoScaleVmProfileVO.java | 5 +- .../com/cloud/network/as/ConditionVO.java | 5 +- .../java/com/cloud/network/as/CounterVO.java | 5 +- .../dao/ExternalLoadBalancerDeviceVO.java | 8 + .../com/cloud/network/dao/IPAddressVO.java | 5 +- .../network/dao/LBStickinessPolicyVO.java | 8 + .../com/cloud/network/dao/LoadBalancerVO.java | 8 + .../dao/PhysicalNetworkServiceProviderVO.java | 8 + .../cloud/network/dao/PhysicalNetworkVO.java | 8 + .../cloud/network/dao/RemoteAccessVpnVO.java | 6 + .../dao/Site2SiteCustomerGatewayVO.java | 8 + .../network/dao/Site2SiteVpnConnectionVO.java | 8 + .../network/dao/Site2SiteVpnGatewayVO.java | 8 + .../cloud/network/rules/FirewallRuleVO.java | 5 +- .../network/security/SecurityGroupRuleVO.java | 9 + .../network/security/SecurityGroupVO.java | 9 + .../cloud/network/vpc/NetworkACLItemVO.java | 5 +- .../com/cloud/network/vpc/NetworkACLVO.java | 2 +- .../com/cloud/network/vpc/VpcGatewayVO.java | 7 +- .../com/cloud/network/vpc/VpcOfferingVO.java | 6 +- .../java/com/cloud/network/vpc/VpcVO.java | 6 +- .../cloud/offerings/NetworkOfferingVO.java | 5 +- .../cloud/projects/ProjectInvitationVO.java | 7 +- .../java/com/cloud/projects/ProjectVO.java | 4 +- .../main/java/com/cloud/storage/BucketVO.java | 8 +- .../com/cloud/storage/DiskOfferingVO.java | 8 + .../com/cloud/storage/SnapshotPolicyVO.java | 8 + .../com/cloud/storage/SnapshotScheduleVO.java | 17 +- .../java/com/cloud/storage/SnapshotVO.java | 8 +- .../java/com/cloud/storage/VMTemplateVO.java | 4 +- .../main/java/com/cloud/storage/VolumeVO.java | 4 +- .../java/com/cloud/user/UserAccountVO.java | 7 + .../src/main/java/com/cloud/user/UserVO.java | 2 +- .../com/cloud/user/dao/AccountDaoImpl.java | 2 +- .../java/com/cloud/vm/InstanceGroupVO.java | 7 + .../src/main/java/com/cloud/vm/NicVO.java | 5 +- .../com/cloud/vm/dao/NicSecondaryIpVO.java | 9 + .../com/cloud/vm/dao/VMInstanceDaoImpl.java | 18 +- .../com/cloud/vm/snapshot/VMSnapshotVO.java | 8 + .../vm/snapshot/dao/VMSnapshotDaoImpl.java | 2 +- .../org/apache/cloudstack/acl/RoleVO.java | 3 +- .../cloudstack/affinity/AffinityGroupVO.java | 6 +- .../cloudstack/backup/BackupScheduleVO.java | 7 + .../apache/cloudstack/backup/BackupVO.java | 7 + .../cluster/ClusterDrsPlanMigrationVO.java | 8 + .../cloudstack/cluster/ClusterDrsPlanVO.java | 8 + .../download/DirectDownloadCertificateVO.java | 8 + .../cloud/entity/api/db/VMEntityVO.java | 3 +- .../lb/ApplicationLoadBalancerRuleVO.java | 8 + .../apache/cloudstack/network/BgpPeerVO.java | 5 +- .../region/gslb/GlobalLoadBalancerRuleVO.java | 10 + .../cloudstack/secstorage/HeuristicVO.java | 4 +- .../storage/datastore/db/ImageStoreVO.java | 8 + .../storage/datastore/db/ObjectStoreVO.java | 8 + .../storage/datastore/db/StoragePoolVO.java | 3 +- .../storage/sharedfs/SharedFSVO.java | 8 + .../cloudstack/vm/schedule/VMScheduleVO.java | 6 + .../vm/schedule/VMScheduledJobVO.java | 10 + .../cloud/vm/dao/VMInstanceDaoImplTest.java | 20 +- ...vmNonManagedStorageDataMotionStrategy.java | 12 +- ...NonManagedStorageSystemDataMotionTest.java | 1 - .../image/TemplateDataFactoryImpl.java | 10 +- .../storage/image/TemplateServiceImpl.java | 82 ++- .../storage/image/store/ImageStoreImpl.java | 5 + .../storage/image/store/TemplateObject.java | 9 + .../storage/object/store/ObjectStoreImpl.java | 8 + .../snapshot/DefaultSnapshotStrategy.java | 14 +- .../snapshot/SnapshotDataFactoryImpl.java | 2 +- .../storage/snapshot/SnapshotObject.java | 16 +- .../storage/snapshot/SnapshotServiceImpl.java | 58 +-- .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 23 +- .../cloudstack/storage/LocalHostEndpoint.java | 5 + .../storage/RemoteHostEndPoint.java | 7 + .../endpoint/DefaultEndPointSelector.java | 2 +- .../storage/helper/VMSnapshotHelperImpl.java | 15 +- .../image/BaseImageStoreDriverImpl.java | 23 +- .../storage/vmsnapshot/VMSnapshotHelper.java | 4 +- .../datastore/PrimaryDataStoreHelper.java | 9 +- .../datastore/PrimaryDataStoreImpl.java | 3 +- .../BasePrimaryDataStoreLifeCycleImpl.java | 5 +- .../provider/DefaultHostListener.java | 40 +- .../storage/volume/VolumeObject.java | 9 +- .../storage/volume/VolumeServiceImpl.java | 183 ++++--- ...BasePrimaryDataStoreLifeCycleImplTest.java | 2 +- .../storage/volume/VolumeServiceTest.java | 6 + .../com/cloud/cluster/ClusterManagerImpl.java | 49 +- .../cloud/cluster/ManagementServerHostVO.java | 5 +- .../com/cloud/utils/db/GenericDaoBase.java | 4 + .../cloudstack/framework/events/Event.java | 8 + .../framework/jobs/impl/AsyncJobVO.java | 27 +- .../affinity/ExplicitDedicationProcessor.java | 6 +- .../affinity/HostAffinityProcessor.java | 2 +- .../affinity/HostAntiAffinityProcessor.java | 10 +- .../NonStrictHostAffinityProcessor.java | 10 +- .../cloudstack/sioc/SiocManagerImpl.java | 11 +- .../backup/DummyBackupProvider.java | 10 +- .../cloudstack/backup/NASBackupProvider.java | 11 +- .../backup/NetworkerBackupProvider.java | 35 +- .../backup/networker/NetworkerClient.java | 6 +- .../backup/VeeamBackupProvider.java | 21 +- .../backup/VeeamBackupProviderTest.java | 2 +- .../DedicatedResourceManagerImpl.java | 50 +- .../deploy/ImplicitDedicationPlanner.java | 13 +- .../apache/cloudstack/cluster/Balanced.java | 22 +- .../cloudstack/cluster/BalancedTest.java | 18 +- .../apache/cloudstack/cluster/Condensed.java | 22 +- .../cloudstack/cluster/CondensedTest.java | 19 +- .../mom/webhook/WebhookServiceImpl.java | 5 +- .../mom/webhook/vo/WebhookDeliveryJoinVO.java | 4 +- .../mom/webhook/vo/WebhookDeliveryVO.java | 2 +- .../mom/webhook/vo/WebhookJoinVO.java | 2 +- .../cloudstack/mom/webhook/vo/WebhookVO.java | 2 +- .../allocator/impl/RandomAllocator.java | 8 +- .../manager/BareMetalDiscoverer.java | 2 +- .../baremetal/manager/BareMetalPlanner.java | 28 +- .../manager/BaremetalManagerImpl.java | 10 +- .../java/com/cloud/ha/HypervInvestigator.java | 2 +- .../discoverer/HypervServerDiscoverer.java | 12 +- .../hypervisor/hyperv/guru/HypervGuru.java | 2 +- .../java/com/cloud/ha/KVMInvestigator.java | 15 +- .../kvm/storage/KVMStorageProcessor.java | 68 +-- .../kvm/storage/LibvirtStoragePool.java | 5 +- .../kvm/storage/MultipathSCSIAdapterBase.java | 5 +- .../cloudstack/kvm/ha/KVMHAProvider.java | 12 +- .../kvm/ha/KVMHostActivityChecker.java | 23 +- .../com/cloud/ha/SimulatorInvestigator.java | 2 +- .../hypervisor/guru/VmwareVmImplementer.java | 2 +- .../vmware/VmwareServerDiscoverer.java | 2 +- .../resource/VmwareStorageProcessor.java | 12 +- .../XenServerStorageMotionStrategy.java | 22 +- .../cloudian/CloudianConnectorImpl.java | 6 +- .../cluster/KubernetesClusterManagerImpl.java | 84 +-- .../cluster/KubernetesClusterVO.java | 8 + .../cluster/KubernetesServiceHelperImpl.java | 2 +- .../KubernetesClusterActionWorker.java | 8 +- .../KubernetesClusterDestroyWorker.java | 34 +- ...esClusterResourceModifierActionWorker.java | 35 +- .../KubernetesClusterScaleWorker.java | 35 +- .../KubernetesClusterStartWorker.java | 24 +- .../KubernetesClusterStopWorker.java | 5 +- .../KubernetesClusterUpgradeWorker.java | 12 +- .../cluster/utils/KubernetesClusterUtil.java | 50 +- .../version/KubernetesSupportedVersionVO.java | 8 + .../version/KubernetesVersionManagerImpl.java | 6 +- .../cluster/DeleteKubernetesClusterCmd.java | 3 +- .../cluster/ScaleKubernetesClusterCmd.java | 3 +- .../cluster/StopKubernetesClusterCmd.java | 3 +- .../cluster/UpgradeKubernetesClusterCmd.java | 3 +- .../metrics/MetricsServiceImpl.java | 2 +- .../cloud/network/BigSwitchBcfDeviceVO.java | 8 + .../network/element/BigSwitchBcfElement.java | 18 +- .../guru/BigSwitchBcfGuestNetworkGuru.java | 10 +- .../network/element/BrocadeVcsElement.java | 14 +- .../guru/BrocadeVcsGuestNetworkGuru.java | 10 +- .../network/cisco/CiscoVnmcControllerVO.java | 9 + .../network/element/CiscoVnmcElement.java | 84 ++- .../element/ElasticLoadBalancerElement.java | 2 +- .../lb/ElasticLoadBalancerManagerImpl.java | 6 +- .../network/lb/LoadBalanceRuleHandler.java | 12 +- .../element/InternalLoadBalancerElement.java | 3 +- .../lb/InternalLoadBalancerVMManagerImpl.java | 14 +- .../management/ContrailElementImpl.java | 18 +- .../contrail/management/ContrailGuru.java | 18 +- .../management/ManagementNetworkGuru.java | 2 +- .../contrail/model/VirtualMachineModel.java | 4 +- .../contrail/model/VirtualNetworkModel.java | 5 +- .../network/element/NetscalerElement.java | 6 +- .../network/vm/NetScalerVMManagerImpl.java | 4 +- .../network/element/NiciraNvpElement.java | 32 +- .../guru/NiciraNvpGuestNetworkGuru.java | 8 +- .../apache/cloudstack/service/NsxElement.java | 17 +- .../service/NsxGuestNetworkGuru.java | 12 +- .../service/NsxPublicNetworkGuru.java | 6 +- .../cloudstack/service/NsxServiceImpl.java | 6 +- .../OpendaylightGuestNetworkGuru.java | 8 +- ...DaylightControllerResourceManagerImpl.java | 8 +- .../com/cloud/network/element/OvsElement.java | 58 +-- .../network/guru/OvsGuestNetworkGuru.java | 5 +- .../network/ovs/OvsTunnelManagerImpl.java | 63 +-- .../ConfigTungstenFabricServiceCmd.java | 2 +- .../tungsten/service/TungstenElement.java | 11 +- .../service/TungstenGuestNetworkGuru.java | 4 +- .../network/guru/VxlanGuestNetworkGuru.java | 2 +- .../driver/CephObjectStoreDriverImpl.java | 2 +- .../driver/AdaptiveDataStoreDriverImpl.java | 47 +- .../AdaptiveDataStoreLifeCycleImpl.java | 37 +- .../provider/AdaptivePrimaryHostListener.java | 41 +- .../ElastistorPrimaryDataStoreDriver.java | 2 +- .../ElastistorPrimaryDataStoreLifeCycle.java | 18 +- .../provider/ElastistorHostListener.java | 10 +- .../driver/DateraPrimaryDataStoreDriver.java | 31 +- .../DateraPrimaryDataStoreLifeCycle.java | 4 +- .../provider/DateraHostListener.java | 18 +- .../CloudStackPrimaryDataStoreDriverImpl.java | 12 +- ...oudStackPrimaryDataStoreLifeCycleImpl.java | 46 +- ...tackPrimaryDataStoreLifeCycleImplTest.java | 4 +- .../LinstorPrimaryDataStoreDriverImpl.java | 4 +- .../LinstorPrimaryDataStoreLifeCycleImpl.java | 16 +- .../provider/LinstorHostListener.java | 4 +- .../snapshot/LinstorVMSnapshotStrategy.java | 18 +- .../NexentaPrimaryDataStoreLifeCycle.java | 2 +- .../ScaleIOGatewayClientConnectionPool.java | 34 +- .../driver/ScaleIOPrimaryDataStoreDriver.java | 144 +++--- .../ScaleIOPrimaryDataStoreLifeCycle.java | 20 +- .../manager/ScaleIOSDCManagerImpl.java | 67 ++- .../provider/ScaleIOHostListener.java | 32 +- .../ScaleIOPrimaryDataStoreDriverTest.java | 17 +- .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 5 +- .../SolidFirePrimaryDataStoreDriver.java | 18 +- .../SolidFirePrimaryDataStoreLifeCycle.java | 4 +- ...idFireSharedPrimaryDataStoreLifeCycle.java | 30 +- .../provider/SolidFireHostListener.java | 38 +- .../provider/SolidFireSharedHostListener.java | 22 +- .../storage/datastore/util/SolidFireUtil.java | 4 +- .../StorPoolBackupSnapshotCommandWrapper.java | 3 +- .../StorPoolPrimaryDataStoreLifeCycle.java | 2 +- .../provider/StorPoolHostListener.java | 20 +- .../motion/StorPoolDataMotionStrategy.java | 18 +- .../snapshot/StorPoolSnapshotStrategy.java | 12 +- .../snapshot/StorPoolVMSnapshotStrategy.java | 15 +- .../api/command/LdapImportUsersCmd.java | 2 +- .../api/command/LinkAccountToLdapCmd.java | 6 +- .../api/command/LinkDomainToLdapCmd.java | 6 +- .../api/command/LinkAccountToLdapCmdTest.java | 4 + .../api/command/LinkDomainToLdapCmdTest.java | 4 + .../cloud/acl/AffinityGroupAccessChecker.java | 4 +- .../java/com/cloud/acl/DomainChecker.java | 24 +- .../allocator/impl/FirstFitAllocator.java | 15 +- .../allocator/impl/RecreateHostAllocator.java | 2 +- .../impl/UserConcentratedAllocator.java | 25 +- .../com/cloud/alert/AlertManagerImpl.java | 21 +- .../com/cloud/alert/ClusterAlertAdapter.java | 10 +- .../main/java/com/cloud/api/ApiServer.java | 10 +- .../com/cloud/api/query/QueryManagerImpl.java | 8 +- .../api/query/dao/SnapshotJoinDaoImpl.java | 2 +- .../api/query/vo/NetworkOfferingJoinVO.java | 7 + .../java/com/cloud/bgp/BGPServiceImpl.java | 36 +- .../cloud/capacity/CapacityManagerImpl.java | 184 +++---- .../ConfigurationManagerImpl.java | 227 ++++---- .../AgentBasedConsoleProxyManager.java | 16 +- ...entBasedStandaloneConsoleProxyManager.java | 22 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 77 ++- .../consoleproxy/ConsoleProxyService.java | 3 +- .../StaticConsoleProxyManager.java | 2 +- .../com/cloud/dc/DedicatedResourceVO.java | 8 + .../deploy/DeploymentPlanningManagerImpl.java | 233 ++++----- .../com/cloud/deploy/FirstFitPlanner.java | 8 +- .../cloud/ha/AbstractInvestigatorImpl.java | 17 +- .../cloud/ha/HighAvailabilityManagerImpl.java | 62 ++- .../ha/ManagementIPSystemVMInvestigator.java | 8 +- .../com/cloud/ha/UserVmDomRInvestigator.java | 27 +- .../CloudZonesStartupProcessor.java | 4 +- .../cloud/hypervisor/HypervisorGuruBase.java | 2 +- .../java/com/cloud/hypervisor/KVMGuru.java | 15 +- .../hypervisor/kvm/dpdk/DpdkHelperImpl.java | 7 +- .../ExternalDeviceUsageManagerImpl.java | 10 +- .../ExternalFirewallDeviceManagerImpl.java | 25 +- ...ExternalLoadBalancerDeviceManagerImpl.java | 52 +- .../cloud/network/IpAddressManagerImpl.java | 110 ++-- .../cloud/network/Ipv6AddressManagerImpl.java | 15 +- .../com/cloud/network/Ipv6ServiceImpl.java | 38 +- .../network/NetworkMigrationManagerImpl.java | 16 +- .../com/cloud/network/NetworkModelImpl.java | 62 ++- .../com/cloud/network/NetworkServiceImpl.java | 129 +++-- .../network/NetworkUsageManagerImpl.java | 36 +- .../cloud/network/SshKeysDistriMonitor.java | 11 +- .../cloud/network/as/AutoScaleManager.java | 5 +- .../network/as/AutoScaleManagerImpl.java | 220 ++++---- .../element/ConfigDriveNetworkElement.java | 31 +- .../network/element/VirtualRouterElement.java | 25 +- .../element/VpcVirtualRouterElement.java | 39 +- .../network/firewall/FirewallManagerImpl.java | 48 +- .../cloud/network/guru/DirectNetworkGuru.java | 6 +- .../guru/DirectPodBasedNetworkGuru.java | 6 +- .../cloud/network/guru/GuestNetworkGuru.java | 13 +- .../lb/LoadBalancingRulesManagerImpl.java | 179 ++++--- .../network/router/CommandSetupHelper.java | 2 +- .../network/router/NetworkHelperImpl.java | 51 +- .../VirtualNetworkApplianceManagerImpl.java | 114 ++-- ...VpcVirtualNetworkApplianceManagerImpl.java | 18 +- .../cloud/network/rules/DhcpSubNetRules.java | 2 +- .../network/rules/PrivateGatewayRules.java | 2 +- .../cloud/network/rules/RulesManagerImpl.java | 101 ++-- .../network/rules/VpcIpAssociationRules.java | 4 +- .../security/SecurityGroupListener.java | 15 +- .../security/SecurityGroupManagerImpl.java | 80 +-- .../security/SecurityGroupManagerImpl2.java | 11 +- .../network/vpc/NetworkACLManagerImpl.java | 24 +- .../network/vpc/NetworkACLServiceImpl.java | 18 +- .../com/cloud/network/vpc/VpcManagerImpl.java | 100 ++-- .../VpcPrivateGatewayTransactionCallable.java | 5 +- .../vpn/RemoteAccessVpnManagerImpl.java | 21 +- .../network/vpn/Site2SiteVpnManagerImpl.java | 26 +- .../com/cloud/projects/ProjectManager.java | 2 +- .../cloud/projects/ProjectManagerImpl.java | 73 ++- .../cloud/resource/ResourceManagerImpl.java | 148 +++--- .../RollingMaintenanceManagerImpl.java | 2 +- .../resourceicon/ResourceIconManagerImpl.java | 4 +- .../ResourceLimitManagerImpl.java | 7 +- .../cloud/server/ConfigurationServerImpl.java | 4 +- .../cloud/server/ManagementServerImpl.java | 30 +- .../java/com/cloud/server/StatsCollector.java | 29 +- .../cloud/servlet/ConsoleProxyServlet.java | 26 +- .../cloud/storage/ImageStoreServiceImpl.java | 6 +- .../storage/ImageStoreUploadMonitorImpl.java | 36 +- .../com/cloud/storage/OCFS2ManagerImpl.java | 20 +- .../com/cloud/storage/StorageManagerImpl.java | 228 ++++---- .../storage/StoragePoolAutomationImpl.java | 7 +- .../cloud/storage/VolumeApiServiceImpl.java | 138 +++-- .../storage/download/DownloadListener.java | 6 +- .../storage/download/DownloadMonitorImpl.java | 6 +- .../storage/listener/StoragePoolMonitor.java | 21 +- .../storage/snapshot/SnapshotManager.java | 3 +- .../storage/snapshot/SnapshotManagerImpl.java | 114 ++-- .../snapshot/SnapshotSchedulerImpl.java | 42 +- .../cloud/storage/upload/UploadListener.java | 4 +- .../storage/upload/UploadMonitorImpl.java | 10 +- .../cloud/tags/TaggedResourceManagerImpl.java | 15 +- .../template/HypervisorTemplateAdapter.java | 38 +- .../cloud/template/TemplateAdapterBase.java | 2 +- .../cloud/template/TemplateManagerImpl.java | 93 ++-- .../com/cloud/usage/UsageServiceImpl.java | 2 +- .../com/cloud/user/AccountManagerImpl.java | 229 ++++---- .../com/cloud/user/DomainManagerImpl.java | 47 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 489 +++++++++--------- .../vm/snapshot/VMSnapshotManagerImpl.java | 46 +- .../acl/ProjectRoleManagerImpl.java | 2 +- .../cloudstack/acl/RoleManagerImpl.java | 4 +- .../affinity/AffinityGroupServiceImpl.java | 8 +- .../agent/lb/IndirectAgentLBServiceImpl.java | 6 +- .../cloudstack/backup/BackupManagerImpl.java | 40 +- .../apache/cloudstack/ca/CAManagerImpl.java | 8 +- .../cluster/ClusterDrsServiceImpl.java | 32 +- .../ConsoleAccessManagerImpl.java | 29 +- .../diagnostics/DiagnosticsServiceImpl.java | 18 +- .../diagnostics/to/DiagnosticsDataObject.java | 8 + .../download/DirectDownloadManagerImpl.java | 91 ++-- .../apache/cloudstack/ha/HAManagerImpl.java | 17 +- .../provider/host/HAAbstractHostProvider.java | 10 +- .../apache/cloudstack/ha/task/BaseHATask.java | 2 +- .../network/RoutedIpv4ManagerImpl.java | 4 +- .../ApplicationLoadBalancerManagerImpl.java | 5 +- .../RouterDeploymentDefinition.java | 12 +- .../VpcRouterDeploymentDefinition.java | 8 +- .../topology/AdvancedNetworkTopology.java | 10 +- .../topology/BasicNetworkTopology.java | 22 +- .../OutOfBandManagementServiceImpl.java | 14 +- .../PowerOperationTask.java | 5 +- .../GlobalLoadBalancingRulesServiceImpl.java | 19 +- .../cloudstack/snapshot/SnapshotHelper.java | 27 +- .../heuristics/HeuristicRuleHelper.java | 8 +- .../GenericHeuristicPresetVariable.java | 4 +- .../storage/object/BucketApiServiceImpl.java | 2 +- .../storage/sharedfs/SharedFSServiceImpl.java | 6 +- .../template/VnfTemplateManagerImpl.java | 16 +- .../VolumeImportUnmanageManagerImpl.java | 34 +- .../user/UserPasswordResetManagerImpl.java | 35 +- .../vm/UnmanagedVMsManagerImpl.java | 111 ++-- .../vm/schedule/VMScheduleManagerImpl.java | 6 +- .../vm/schedule/VMSchedulerImpl.java | 30 +- .../com/cloud/alert/AlertManagerImplTest.java | 25 + .../cloud/capacity/CapacityManagerTest.java | 4 +- .../ConfigurationManagerTest.java | 2 +- .../DeploymentPlanningManagerImplTest.java | 4 +- .../ha/HighAvailabilityManagerImplTest.java | 2 - .../cloud/network/Ipv6ServiceImplTest.java | 37 +- .../network/MockFirewallManagerImpl.java | 4 +- .../cloud/network/NetworkServiceImplTest.java | 4 +- .../network/as/AutoScaleManagerImplTest.java | 52 +- .../element/VpcVirtualRouterElementTest.java | 6 +- .../projects/MockProjectManagerImpl.java | 2 +- .../resource/MockResourceManagerImpl.java | 2 +- .../storage/VolumeApiServiceImplTest.java | 3 - .../listener/StoragePoolMonitorTest.java | 6 +- .../HypervisorTemplateAdapterTest.java | 13 +- .../cloud/user/AccountManagerImplTest.java | 7 +- .../com/cloud/user/DomainManagerImplTest.java | 6 +- .../com/cloud/vm/UserVmManagerImplTest.java | 3 +- .../java/com/cloud/vm/UserVmManagerTest.java | 3 +- .../vm/snapshot/VMSnapshotManagerTest.java | 8 +- .../vpc/MockConfigurationManagerImpl.java | 4 +- .../cluster/ClusterDrsServiceImplTest.java | 8 +- .../RouterDeploymentDefinitionTest.java | 6 + .../heuristics/HeuristicRuleHelperTest.java | 3 - .../VolumeImportUnmanageManagerImplTest.java | 18 +- .../vm/UnmanagedVMsManagerImplTest.java | 1 - .../vm/schedule/VMSchedulerImplTest.java | 2 +- .../cloud/usage/parser/BucketUsageParser.java | 2 +- .../usage/parser/NetworksUsageParser.java | 4 +- .../cloud/usage/parser/VpcUsageParser.java | 6 +- 468 files changed, 5472 insertions(+), 4706 deletions(-) diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 15f010808ac..c84179d6660 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -132,6 +132,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater ServerResource _resource; Link _link; Long _id; + String _uuid; + String _name; Timer _timer = new Timer("Agent Timer"); Timer certTimer; @@ -182,8 +184,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater resource.setAgentControl(this); final String value = _shell.getPersistentProperty(getResourceName(), "id"); + _uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); + _name = _shell.getPersistentProperty(getResourceName(), "name"); _id = value != null ? Long.parseLong(value) : null; - logger.info("id is {}", ObjectUtils.defaultIfNull(_id, "")); + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name); final Map params = new HashMap<>(); @@ -212,8 +216,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( "agentRequest-Handler")); - logger.info("Agent [id = {} : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", ObjectUtils.defaultIfNull(_id, "new"), getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", + ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(), + _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); } public String getVersion() { @@ -377,11 +382,28 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } public void setId(final Long id) { - logger.debug("Set agent id {}", id); _id = id; _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } + public String getUuid() { + return _uuid; + } + + public void setUuid(String uuid) { + this._uuid = uuid; + _shell.setPersistentProperty(getResourceName(), "uuid", uuid); + } + + public String getName() { + return _name; + } + + public void setName(String name) { + this._name = name; + _shell.setPersistentProperty(getResourceName(), "name", name); + } + private synchronized void scheduleServicesRestartTask() { if (certTimer != null) { certTimer.cancel(); @@ -594,9 +616,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return; } - logger.info("Process agent startup answer, agent id = {}", startup.getHostId()); + logger.info("Process agent startup answer, agent [id: {}, uuid: {}, name: {}] connected to the server", + startup.getHostId(), startup.getHostUuid(), startup.getHostName()); setId(startup.getHostId()); + setUuid(startup.getHostUuid()); + setName(startup.getHostName()); _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. setLastPingResponseTime(); @@ -604,7 +629,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); - logger.info("Startup Response Received: agent id = {}", getId()); + logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]", + startup.getHostId(), startup.getHostUuid(), startup.getHostName()); } protected void processRequest(final Request request, final Link link) { @@ -860,15 +886,17 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater NumbersUtil.enableHumanReadableSizes = humanReadable; } - logger.info("Processing agent ready command, agent id = {}", ready.getHostId()); + logger.info("Processing agent ready command, agent id = {}, uuid = {}, name = {}", ready.getHostId(), ready.getHostUuid(), ready.getHostName()); if (ready.getHostId() != null) { setId(ready.getHostId()); + setUuid(ready.getHostUuid()); + setName(ready.getHostName()); } verifyAgentArch(ready.getArch()); processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval()); - logger.info("Ready command is processed for agent id = {}", getId()); + logger.info("Ready command is processed for agent [id: {}, uuid: {}, name: {}]", getId(), getUuid(), getName()); } private void verifyAgentArch(String arch) { diff --git a/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java b/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java index 966d24886fe..f395f26aeed 100644 --- a/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java @@ -374,13 +374,15 @@ public class LoadBalancerTO { public static class CounterTO implements Serializable { private static final long serialVersionUID = 2L; private final Long id; + private final String uuid; private final String name; private final Counter.Source source; private final String value; private final String provider; - public CounterTO(Long id, String name, Counter.Source source, String value, String provider) { + public CounterTO(Long id, String uuid, String name, Counter.Source source, String value, String provider) { this.id = id; + this.uuid = uuid; this.name = name; this.source = source; this.value = value; @@ -391,6 +393,10 @@ public class LoadBalancerTO { return id; } + public String getUuid() { + return uuid; + } + public String getName() { return name; } @@ -411,12 +417,14 @@ public class LoadBalancerTO { public static class ConditionTO implements Serializable { private static final long serialVersionUID = 2L; private final Long id; + private final String uuid; private final long threshold; private final Condition.Operator relationalOperator; private final CounterTO counter; - public ConditionTO(Long id, long threshold, Condition.Operator relationalOperator, CounterTO counter) { + public ConditionTO(Long id, String uuid, long threshold, Condition.Operator relationalOperator, CounterTO counter) { this.id = id; + this.uuid = uuid; this.threshold = threshold; this.relationalOperator = relationalOperator; this.counter = counter; @@ -426,6 +434,10 @@ public class LoadBalancerTO { return id; } + public String getUuid() { + return uuid; + } + public long getThreshold() { return threshold; } @@ -442,6 +454,7 @@ public class LoadBalancerTO { public static class AutoScalePolicyTO implements Serializable { private static final long serialVersionUID = 2L; private final long id; + private final String uuid; private final int duration; private final int quietTime; private final Date lastQuietTime; @@ -449,8 +462,9 @@ public class LoadBalancerTO { boolean revoked; private final List conditions; - public AutoScalePolicyTO(long id, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List conditions, boolean revoked) { + public AutoScalePolicyTO(long id, String uuid, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List conditions, boolean revoked) { this.id = id; + this.uuid = uuid; this.duration = duration; this.quietTime = quietTime; this.lastQuietTime = lastQuietTime; @@ -463,6 +477,10 @@ public class LoadBalancerTO { return id; } + public String getUuid() { + return uuid; + } + public int getDuration() { return duration; } diff --git a/api/src/main/java/com/cloud/agent/api/to/NfsTO.java b/api/src/main/java/com/cloud/agent/api/to/NfsTO.java index 0f6511e8311..eeddbf649a7 100644 --- a/api/src/main/java/com/cloud/agent/api/to/NfsTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/NfsTO.java @@ -17,6 +17,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class NfsTO implements DataStoreTO { @@ -41,6 +42,13 @@ public class NfsTO implements DataStoreTO { } + @Override + public String toString() { + return String.format("NfsTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "uuid", "_url", "_role", "nfsVersion")); + } + @Override public String getUrl() { return _url; diff --git a/api/src/main/java/com/cloud/agent/api/to/S3TO.java b/api/src/main/java/com/cloud/agent/api/to/S3TO.java index 233238cf793..936f8168b1e 100644 --- a/api/src/main/java/com/cloud/agent/api/to/S3TO.java +++ b/api/src/main/java/com/cloud/agent/api/to/S3TO.java @@ -22,6 +22,7 @@ import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.LogLevel.Log4jLevel; import com.cloud.storage.DataStoreRole; import com.cloud.utils.storage.S3.ClientOptions; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public final class S3TO implements ClientOptions, DataStoreTO { @@ -68,6 +69,13 @@ public final class S3TO implements ClientOptions, DataStoreTO { } + @Override + public String toString() { + return String.format("S3TO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "bucketName")); + } + public Long getId() { return this.id; } diff --git a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java index e361e7a141f..cbdb7922eb4 100644 --- a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java @@ -19,6 +19,7 @@ package com.cloud.agent.api.to; import com.cloud.agent.api.LogLevel; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class StorageFilerTO { long id; @@ -73,6 +74,6 @@ public class StorageFilerTO { @Override public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(host).append(":").append(port).append("|").append(path).append("]").toString(); + return String.format("Pool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "host", "port", "path")); } } diff --git a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java index b89dfea40e0..14038566fbd 100644 --- a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java @@ -18,6 +18,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; import com.cloud.utils.SwiftUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { Long id; @@ -41,6 +42,13 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { this.storagePolicy = storagePolicy; } + @Override + public String toString() { + return String.format("SwiftTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "account", "userName")); + } + public Long getId() { return id; } diff --git a/api/src/main/java/com/cloud/network/Ipv6Service.java b/api/src/main/java/com/cloud/network/Ipv6Service.java index 2b4dff01086..4ef5f98c38d 100644 --- a/api/src/main/java/com/cloud/network/Ipv6Service.java +++ b/api/src/main/java/com/cloud/network/Ipv6Service.java @@ -58,7 +58,7 @@ public interface Ipv6Service extends PluggableService, Configurable { Pair getUsedTotalIpv6SubnetForZone(long zoneId); - Pair preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException; + Pair preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException; void assignIpv6SubnetToNetwork(String subnet, long networkId); diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 83dc247cc9e..2e8efb48930 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -22,6 +22,7 @@ import java.util.Date; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class NetworkProfile implements Network { private final long id; @@ -384,4 +385,11 @@ public class NetworkProfile implements Network { return networkCidrSize; } + @Override + public String toString() { + return String.format("NetworkProfile %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "networkOfferingId")); + } + } diff --git a/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java b/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java index 64b2aeedf12..e4cf4ec526f 100644 --- a/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java +++ b/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java @@ -63,6 +63,10 @@ public class LoadBalancingRule { return lb.getId(); } + public LoadBalancer getLb() { + return lb; + } + public String getName() { return lb.getName(); } diff --git a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java index bbb9771d27a..ffa8af4576d 100644 --- a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java +++ b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java @@ -39,7 +39,7 @@ public interface RemoteAccessVpnService { VpnUser addVpnUser(long vpnOwnerId, String userName, String password); - boolean removeVpnUser(long vpnOwnerId, String userName, Account caller); + boolean removeVpnUser(Account vpnOwner, String userName, Account caller); List listVpnUsers(long vpnOwnerId, String userName); diff --git a/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java b/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java index ab6e6fb6c5a..3b61367e3b4 100644 --- a/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java +++ b/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java @@ -19,6 +19,7 @@ package com.cloud.region.ha; import java.util.List; +import com.cloud.user.Account; import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.DeleteGlobalLoadBalancerRuleCmd; @@ -39,7 +40,7 @@ public interface GlobalLoadBalancingRulesService { GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalancerRuleCmd updateGslbCmd); - boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException; + boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException; /* * methods for managing sites participating in global load balancing diff --git a/api/src/main/java/com/cloud/vm/NicProfile.java b/api/src/main/java/com/cloud/vm/NicProfile.java index 183c8dcb2d5..a0c80ceb1bf 100644 --- a/api/src/main/java/com/cloud/vm/NicProfile.java +++ b/api/src/main/java/com/cloud/vm/NicProfile.java @@ -450,6 +450,9 @@ public class NicProfile implements InternalIdentity, Serializable { @Override public String toString() { - return String.format("NicProfile %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "vmId", "deviceId", "broadcastUri", "reservationId", "iPv4Address")); + return String.format("NicProfile %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vmId", "deviceId", + "broadcastUri", "reservationId", "iPv4Address")); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java index 88eeadb9b13..397f9c80735 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java @@ -125,8 +125,9 @@ public class UpdateHostCmd extends BaseCmd { hostResponse.setResponseName(getCommandName()); this.setResponseObject(hostResponse); } catch (Exception e) { - logger.debug("Failed to update host:" + getId(), e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update host:" + getId() + "," + e.getMessage()); + Host host = _entityMgr.findById(Host.class, getId()); + logger.debug("Failed to update host: {} with id {}", host, getId(), e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to update host: %s with id %d, %s", host, getId(), e.getMessage())); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java index 6bebdc09f59..cdd908dfb87 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java @@ -124,10 +124,10 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { if (account.getType() == Account.Type.PROJECT) { Project project = _projectService.findByProjectAccountId(vmsnapshot.getAccountId()); if (project == null) { - throw new InvalidParameterValueException("Unable to find project by account id=" + account.getUuid()); + throw new InvalidParameterValueException(String.format("Unable to find project by account %s", account)); } if (project.getState() != Project.State.Active) { - throw new PermissionDeniedException("Can't add resources to the project id=" + project.getUuid() + " in state=" + project.getState() + " as it's no longer active"); + throw new PermissionDeniedException(String.format("Can't add resources to the project %s in state=%s as it's no longer active", project, project.getState())); } } else if (account.getState() == Account.State.DISABLED) { throw new PermissionDeniedException("The owner of template is disabled: " + account); @@ -164,8 +164,9 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { @Override public void execute() { - logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot id:" + getVMSnapshotId() + " and snapshot id:" + getEntityId() + " starts:" + System.currentTimeMillis()); - CallContext.current().setEventDetails("Vm Snapshot Id: "+ this._uuidMgr.getUuid(VMSnapshot.class, getVMSnapshotId())); + VMSnapshot vmSnapshot = _vmSnapshotService.getVMSnapshotById(getVMSnapshotId()); + logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot {} with id {} and snapshot [id: {}, uuid: {}]", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid()); + CallContext.current().setEventDetails("Vm Snapshot Id: " + vmSnapshot.getUuid()); Snapshot snapshot = null; try { snapshot = _snapshotService.backupSnapshotFromVmSnapshot(getEntityId(), getVmId(), getVolumeId(), getVMSnapshotId()); @@ -174,19 +175,19 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { response.setResponseName(getCommandName()); this.setResponseObject(response); } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot)); } } catch (InvalidParameterValueException ex) { throw ex; } catch (Exception e) { logger.debug("Failed to create snapshot", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot)); } finally { if (snapshot == null) { try { _snapshotService.deleteSnapshot(getEntityId(), null); } catch (Exception e) { - logger.debug("Failed to clean failed snapshot" + getEntityId()); + logger.debug("Failed to clean failed snapshot {} with id {}", () -> _entityMgr.findById(Snapshot.class, getEntityId()), this::getEntityId); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java index 48e7a9ee519..0697987b04d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java @@ -104,7 +104,7 @@ public class RemoveVpnUserCmd extends BaseAsyncCmd { public void execute() { Account owner = _accountService.getAccount(getEntityOwnerId()); long ownerId = owner.getId(); - boolean result = _ravService.removeVpnUser(ownerId, userName, CallContext.current().getCallingAccount()); + boolean result = _ravService.removeVpnUser(owner, userName, CallContext.current().getCallingAccount()); if (!result) { String errorMessage = String.format("Failed to remove VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); logger.error(errorMessage); diff --git a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java index 15f7fcd8174..665f95842b0 100644 --- a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java +++ b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.Adapter; @@ -55,8 +56,8 @@ public interface ClusterDrsAlgorithm extends Adapter { * @throws ConfigurationException * if there is an error in the configuration */ - boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException; + boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException; /** @@ -79,7 +80,7 @@ public interface ClusterDrsAlgorithm extends Adapter { * * @return a ternary containing improvement, cost, benefit */ - Ternary getMetrics(long clusterId, VirtualMachine vm, ServiceOffering serviceOffering, + Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException; diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 5697a040b81..0802098cb4f 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -17,7 +17,7 @@ package org.apache.cloudstack.vm; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.List; @@ -179,6 +179,13 @@ public class UnmanagedInstanceTO { this.vncPassword = vncPassword; } + @Override + public String toString() { + return String.format("UnmanagedInstanceTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "name", "internalCSName", "hostName", "clusterName")); + } + public static class Disk { private String diskId; @@ -322,12 +329,9 @@ public class UnmanagedInstanceTO { @Override public String toString() { - return "Disk {" + - "diskId='" + diskId + '\'' + - ", capacity=" + toHumanReadableSize(capacity) + - ", controller='" + controller + '\'' + - ", controllerUnit=" + controllerUnit + - "}"; + return String.format("Disk %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "diskId", "internalCSName", "controller", "controllerUnit")); } } @@ -424,11 +428,9 @@ public class UnmanagedInstanceTO { @Override public String toString() { - return "Nic{" + - "nicId='" + nicId + '\'' + - ", adapterType='" + adapterType + '\'' + - ", macAddress='" + macAddress + '\'' + - "}"; + return String.format("Nic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "nicId", "adapterType", "macAddress")); } } } diff --git a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java index b12c1b81d4a..e7ecbebae7b 100644 --- a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java +++ b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java @@ -41,16 +41,19 @@ public class LoadBalancerTOTest { LoadBalancerTO.AutoScaleVmGroupTO vmGroup; private static final Long counterId = 1L; + private static final String counterUuid = "1111-1111-1100"; private static final String counterName = "counter name"; private static final Counter.Source counterSource = Counter.Source.CPU; private static final String counterValue = "counter value"; private static final String counterProvider = "VIRTUALROUTER"; private static final Long conditionId = 2L; + private static final String conditionUuid = "1111-1111-1110"; private static final Long threshold = 100L; private static final Condition.Operator relationalOperator = Condition.Operator.GT; private static final Long scaleUpPolicyId = 11L; + private static final String scaleUpPolicyUuid = "1111-1111-1111"; private static final int scaleUpPolicyDuration = 61; private static final int scaleUpPolicyQuietTime = 31; private static final Date scaleUpPolicyLastQuietTime = new Date(); @@ -85,14 +88,14 @@ public class LoadBalancerTOTest { @Before public void setUp() { - counter = new LoadBalancerTO.CounterTO(counterId, counterName, counterSource, counterValue, counterProvider); - condition = new LoadBalancerTO.ConditionTO(conditionId, threshold, relationalOperator, counter); - scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyDuration, scaleUpPolicyQuietTime, - scaleUpPolicyLastQuietTime, AutoScalePolicy.Action.SCALEUP, - Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); - scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleDownPolicyDuration, scaleDownPolicyQuietTime, - scaleDownPolicyLastQuietTime, AutoScalePolicy.Action.SCALEDOWN, - Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); + counter = new LoadBalancerTO.CounterTO(counterId, counterUuid, counterName, counterSource, counterValue, counterProvider); + condition = new LoadBalancerTO.ConditionTO(conditionId, conditionUuid, threshold, relationalOperator, counter); + scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyUuid, scaleUpPolicyDuration, + scaleUpPolicyQuietTime, scaleUpPolicyLastQuietTime, + AutoScalePolicy.Action.SCALEUP, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); + scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleUpPolicyUuid, scaleDownPolicyDuration, + scaleDownPolicyQuietTime, scaleDownPolicyLastQuietTime, + AutoScalePolicy.Action.SCALEDOWN, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); vmProfile = new LoadBalancerTO.AutoScaleVmProfileTO(zoneId, domainId, cloudStackApiUrl, autoScaleUserApiKey, autoScaleUserSecretKey, serviceOfferingId, templateId, vmName, networkId, otherDeployParams, counterParamList, expungeVmGracePeriod); @@ -113,6 +116,7 @@ public class LoadBalancerTOTest { @Test public void testConditionTO() { Assert.assertEquals(conditionId, condition.getId()); + Assert.assertEquals(conditionUuid, condition.getUuid()); Assert.assertEquals((long) threshold, condition.getThreshold()); Assert.assertEquals(relationalOperator, condition.getRelationalOperator()); Assert.assertEquals(counter, condition.getCounter()); diff --git a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java index 42f1d264a50..e2d974e3878 100644 --- a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java @@ -19,6 +19,8 @@ package com.cloud.agent.api; +import com.cloud.host.Host; + import java.util.List; public class ReadyCommand extends Command { @@ -30,6 +32,8 @@ public class ReadyCommand extends Command { private Long dcId; private Long hostId; + private String hostUuid; + private String hostName; private List msHostList; private String lbAlgorithm; private Long lbCheckInterval; @@ -41,9 +45,11 @@ public class ReadyCommand extends Command { this.dcId = dcId; } - public ReadyCommand(final Long dcId, final Long hostId, boolean enableHumanReadableSizes) { - this(dcId); - this.hostId = hostId; + public ReadyCommand(final Host host, boolean enableHumanReadableSizes) { + this(host.getDataCenterId()); + this.hostId = host.getId(); + this.hostUuid = host.getUuid(); + this.hostName = host.getName(); this.enableHumanReadableSizes = enableHumanReadableSizes; } @@ -68,6 +74,14 @@ public class ReadyCommand extends Command { return hostId; } + public String getHostUuid() { + return hostUuid; + } + + public String getHostName() { + return hostName; + } + public List getMsHostList() { return msHostList; } diff --git a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java index 71652269b66..c619ce75ace 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java @@ -21,14 +21,18 @@ package com.cloud.agent.api; public class StartupAnswer extends Answer { long hostId; + String hostName; + String hostUuid; int pingInterval; protected StartupAnswer() { } - public StartupAnswer(StartupCommand cmd, long hostId, int pingInterval) { + public StartupAnswer(StartupCommand cmd, long hostId, String hostUuid, String hostName, int pingInterval) { super(cmd); this.hostId = hostId; + this.hostUuid = hostUuid; + this.hostName = hostName; this.pingInterval = pingInterval; } @@ -40,6 +44,14 @@ public class StartupAnswer extends Answer { return hostId; } + public String getHostUuid() { + return hostUuid; + } + + public String getHostName() { + return hostName; + } + public int getPingInterval() { return pingInterval; } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java index 046a2ab9410..4bf29205673 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java @@ -23,6 +23,7 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreInfo; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class ImageStoreTO implements DataStoreTO { private String type; @@ -78,15 +79,9 @@ public class ImageStoreTO implements DataStoreTO { @Override public String toString() { - return new StringBuilder("ImageStoreTO[type=").append(type) - .append("|provider=") - .append(providerName) - .append("|role=") - .append(role) - .append("|uri=") - .append(uri) - .append("]") - .toString(); + return String.format("ImageStoreTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "uuid", "type", "providerName", "role", "uri")); } @Override diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index a6a74176c13..2c758fa5087 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.StoragePoolType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class PrimaryDataStoreTO implements DataStoreTO { public static final String MANAGED = PrimaryDataStore.MANAGED; @@ -145,15 +146,9 @@ public class PrimaryDataStoreTO implements DataStoreTO { @Override public String toString() { - return new StringBuilder("PrimaryDataStoreTO[uuid=").append(uuid) - .append("|name=") - .append(name) - .append("|id=") - .append(id) - .append("|pooltype=") - .append(poolType) - .append("]") - .toString(); + return String.format("PrimaryDataStoreTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "poolType")); } public Boolean isFullCloneFlag() { diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java index eafe8f83269..dc68b31a3fd 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -27,6 +27,7 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.Storage.ImageFormat; import com.cloud.template.VirtualMachineTemplate; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class TemplateObjectTO extends DownloadableObjectTO implements DataTO { private String path; @@ -264,6 +265,8 @@ public class TemplateObjectTO extends DownloadableObjectTO implements DataTO { @Override public String toString() { - return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString(); + return String.format("TemplateTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "origUrl")); } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 6514038ac62..4d1d0bf9097 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -30,6 +30,7 @@ import com.cloud.offering.DiskOffering.DiskCacheMode; import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage; import com.cloud.storage.Volume; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.Arrays; @@ -258,7 +259,9 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO { @Override public String toString() { - return new StringBuilder("volumeTO[uuid=").append(uuid).append("|path=").append(path).append("|datastore=").append(dataStore).append("]").toString(); + return String.format("volumeTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "path", "dataStore")); } public void setBytesReadRate(Long bytesReadRate) { diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java index 254c91d3544..df78928ddc3 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java @@ -24,6 +24,8 @@ import com.cloud.agent.api.Command; public interface EndPoint { long getId(); + String getUuid(); + String getHostAddr(); String getPublicAddr(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java index 6ac4030e1a6..6b9a48b5a53 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java @@ -19,12 +19,22 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.exception.StorageConflictException; +import com.cloud.host.Host; +import com.cloud.storage.StoragePool; public interface HypervisorHostListener { boolean hostAdded(long hostId); + default boolean hostConnect(Host host, StoragePool pool) throws StorageConflictException { + return hostConnect(host.getId(), pool.getId()); + } + boolean hostConnect(long hostId, long poolId) throws StorageConflictException; + default boolean hostDisconnected(Host host, StoragePool pool) throws StorageConflictException { + return hostDisconnected(host.getId(), pool.getId()); + } + boolean hostDisconnected(long hostId, long poolId); boolean hostAboutToBeRemoved(long hostId); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index 3bd3100e84e..1f7bf45a15a 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -22,6 +22,8 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.UserData; public interface TemplateInfo extends DownloadableDataInfo, VirtualMachineTemplate { + VirtualMachineTemplate getImage(); + @Override String getUniqueName(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index df13f951a44..115cf024617 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.template.VirtualMachineTemplate; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; @@ -60,7 +61,7 @@ public interface TemplateService { AsyncCallFuture deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool); - void syncTemplateToRegionStore(long templateId, DataStore store); + void syncTemplateToRegionStore(VirtualMachineTemplate templateId, DataStore store); void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId); diff --git a/engine/components-api/src/main/java/com/cloud/agent/Listener.java b/engine/components-api/src/main/java/com/cloud/agent/Listener.java index 843a634b4c0..ceba5f34b82 100644 --- a/engine/components-api/src/main/java/com/cloud/agent/Listener.java +++ b/engine/components-api/src/main/java/com/cloud/agent/Listener.java @@ -43,6 +43,10 @@ public interface Listener { */ boolean processAnswers(long agentId, long seq, Answer[] answers); + default boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { + return processAnswers(agentId, seq, answers); + } + /** * This method is called by the AgentManager when an agent sent * a command to the server. In order to process these commands, @@ -92,6 +96,10 @@ public interface Listener { */ boolean processDisconnect(long agentId, Status state); + default boolean processDisconnect(long agentId, String uuid, String name, Status state) { + return processDisconnect(agentId, state); + } + /** * This method is called by AgentManager when a host is about to be removed from a cluster. * @param long the ID of the host that's about to be removed diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index e1bb10f5d26..cbd137e8682 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -140,7 +140,7 @@ public interface CapacityManager { * @param ram required RAM * @param cpuOverprovisioningFactor factor to apply to the actual host cpu */ - boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, + boolean checkIfHostHasCapacity(Host host, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, boolean considerReservedCapacity); void updateCapacityForHost(Host host); diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java index 01fd54430d6..1694b19c33f 100644 --- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java @@ -238,7 +238,7 @@ public interface ConfigurationManager { * @param domainId * @return success/failure */ - boolean releaseDomainSpecificVirtualRanges(long domainId); + boolean releaseDomainSpecificVirtualRanges(Domain domain); /** * Release dedicated virtual ip ranges of an account. @@ -246,7 +246,7 @@ public interface ConfigurationManager { * @param accountId * @return success/failure */ - boolean releaseAccountSpecificVirtualRanges(long accountId); + boolean releaseAccountSpecificVirtualRanges(Account account); /** * Edits a pod in the database. Will not allow you to edit pods that are being used anywhere in the system. diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java index 36937460b20..b1cad20b19e 100644 --- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java @@ -19,6 +19,7 @@ package com.cloud.network; import java.util.Date; import java.util.List; +import com.cloud.user.User; import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse; import org.apache.cloudstack.framework.config.ConfigKey; @@ -88,7 +89,7 @@ public interface IpAddressManager { * @param caller * @return true if it did; false if it didn't */ - boolean disassociatePublicIpAddress(long id, long userId, Account caller); + boolean disassociatePublicIpAddress(IpAddress ipAddress, long userId, Account caller); boolean applyRules(List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException; @@ -191,7 +192,7 @@ public interface IpAddressManager { PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat) throws ConcurrentOperationException, InsufficientAddressCapacityException; - IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone, Boolean displayIp, String ipaddress) + IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, User callerId, DataCenter zone, Boolean displayIp, String ipaddress) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List vlanDbIds, Long networkId, String requestedIp, String requestedGateway, boolean isSystem) diff --git a/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java b/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java index d61b446cad7..669456cbdcc 100644 --- a/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java @@ -62,7 +62,7 @@ public interface LoadBalancingRulesManager { */ boolean removeVmFromLoadBalancers(long vmId); - boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException; + boolean applyLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException; String getLBCapability(long networkid, String capabilityName); @@ -74,7 +74,7 @@ public interface LoadBalancingRulesManager { boolean configureLbAutoScaleVmGroup(long vmGroupid, AutoScaleVmGroup.State currentState) throws ResourceUnavailableException; - boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException; + boolean revokeLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException; boolean validateLbRule(LoadBalancingRule lbRule); diff --git a/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java b/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java index 0471086c43d..1a79135f25e 100644 --- a/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java @@ -20,6 +20,8 @@ import java.util.List; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.firewall.FirewallService; import com.cloud.network.rules.FirewallRule.FirewallRuleType; @@ -53,7 +55,7 @@ public interface FirewallManager extends FirewallService { public void revokeRule(FirewallRuleVO rule, Account caller, long userId, boolean needUsageEvent); - boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException; + boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException; // /** // * Revokes a firewall rule @@ -75,7 +77,7 @@ public interface FirewallManager extends FirewallService { FirewallRule createRuleForAllCidrs(long ipAddrId, Account caller, Integer startPort, Integer endPort, String protocol, Integer icmpCode, Integer icmpType, Long relatedRuleId, long networkId) throws NetworkRuleConflictException; - boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException; + boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException; boolean revokeFirewallRulesForVm(long vmId); diff --git a/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java b/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java index c77874329fc..79ffdfdb973 100644 --- a/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java @@ -22,6 +22,7 @@ import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; +import com.cloud.network.Network; import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.vm.Nic; @@ -47,7 +48,7 @@ public interface RulesManager extends RulesService { FirewallRule[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose purpose, boolean openFirewall, Account caller, int... ports) throws NetworkRuleConflictException; - boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller); + boolean applyStaticNatsForNetwork(Network network, boolean continueOnError, Account caller); void getSystemIpAndEnableStaticNatForVm(VirtualMachine vm, boolean getNewIp) throws InsufficientAddressCapacityException; @@ -60,7 +61,7 @@ public interface RulesManager extends RulesService { * @param forRevoke * @return */ - boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, Account caller, boolean forRevoke); + boolean applyStaticNatForNetwork(Network network, boolean continueOnError, Account caller, boolean forRevoke); List listAssociatedRulesForGuestNic(Nic nic); diff --git a/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java b/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java index ffca4bb013b..6e2270ffb10 100644 --- a/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java @@ -19,6 +19,7 @@ package com.cloud.network.security; import java.util.HashMap; import java.util.List; +import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; /** @@ -36,9 +37,9 @@ public interface SecurityGroupManager { public SecurityGroupVO createDefaultSecurityGroup(Long accountId); - public boolean addInstanceToGroups(Long userVmId, List groups); + public boolean addInstanceToGroups(UserVm userVm, List groups); - public void removeInstanceFromGroups(long userVmId); + public void removeInstanceFromGroups(UserVm userVm); public void fullSync(long agentId, HashMap> newGroupStates); diff --git a/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java b/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java index a340f49c13f..15158b72fab 100644 --- a/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import com.cloud.network.dao.IPAddressVO; import com.cloud.utils.Pair; import org.apache.cloudstack.acl.ControlledEntity.ACLType; @@ -82,6 +83,8 @@ public interface VpcManager { */ void unassignIPFromVpcNetwork(long ipId, long networkId); + void unassignIPFromVpcNetwork(final IPAddressVO ip, final Network network); + /** * Creates guest network in the VPC * diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index b2ae8b89837..343ad0fa212 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -185,7 +185,7 @@ public interface ResourceManager extends ResourceService, Configurable { * @param vgpuType the VGPU type * @return true when the host has the capacity with given VGPU type */ - boolean isGPUDeviceAvailable(long hostId, String groupName, String vgpuType); + boolean isGPUDeviceAvailable(Host host, String groupName, String vgpuType); /** * Get available GPU device diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index b5153668899..0b9f7bcb7db 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -365,9 +365,9 @@ public interface StorageManager extends StorageService { String getStoragePoolMountFailureReason(String error); - boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; - void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException; void enableHost(long hostId) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java index 997ae3985f1..b8912526fdf 100644 --- a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java +++ b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java @@ -120,7 +120,7 @@ public interface TemplateManager { DataStore getImageStore(long tmpltId); - Long getTemplateSize(long templateId, long zoneId); + Long getTemplateSize(VirtualMachineTemplate template, long zoneId); DataStore getImageStore(String storeUuid, Long zoneId, VolumeVO volume); @@ -143,7 +143,7 @@ public interface TemplateManager { TemplateType validateTemplateType(BaseCmd cmd, boolean isAdmin, boolean isCrossZones); - List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role, String configurationId); + List getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId); static Boolean getValidateUrlIsResolvableBeforeRegisteringTemplateValue() { return ValidateUrlIsResolvableBeforeRegisteringTemplate.value(); diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java index c82edc70ded..ef1c71e6b01 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java @@ -26,9 +26,7 @@ import org.apache.cloudstack.jobs.JobInfo; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import com.cloud.serializer.GsonHelper; import com.cloud.utils.Pair; -import com.google.gson.Gson; /** * VmWorkJobHandlerProxy can not be used as standalone due to run-time @@ -44,10 +42,8 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler { private Object _target; private Map, Method> _handlerMethodMap = new HashMap, Method>(); - private Gson _gsonLogger; public VmWorkJobHandlerProxy(Object target) { - _gsonLogger = GsonHelper.getGsonLogger(); buildLookupMap(target.getClass()); _target = target; @@ -123,10 +119,10 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler { throw e; } } else { - logger.error("Unable to find handler for VM work job: " + work.getClass().getName() + _gsonLogger.toJson(work)); + logger.error("Unable to find handler for VM work job: {} {}", work.getClass().getName(), work); RuntimeException ex = new RuntimeException("Unable to find handler for VM work job: " + work.getClass().getName()); - return new Pair(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); + return new Pair<>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index c0f7586aee0..30a58d405c9 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit; import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -111,6 +112,7 @@ public abstract class AgentAttache { protected static String LOG_SEQ_FORMATTED_STRING; protected final long _id; + protected String _uuid; protected String _name = null; protected final ConcurrentHashMap _waitForList; protected final LinkedList _requests; @@ -133,8 +135,9 @@ public abstract class AgentAttache { Arrays.sort(s_commandsNotAllowedInConnectingMode); } - protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final boolean maintenance) { + protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final boolean maintenance) { _id = id; + _uuid = uuid; _name = name; _waitForList = new ConcurrentHashMap(); _currentSequence = null; @@ -145,6 +148,13 @@ public abstract class AgentAttache { LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id); } + @Override + public String toString() { + return String.format("AgentAttache %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "_id", "_uuid", "_name")); + } + public synchronized long getNextSequence() { return ++_nextSequence; } @@ -206,7 +216,7 @@ public abstract class AgentAttache { logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling."); final Listener listener = _waitForList.remove(seq); if (listener != null) { - listener.processDisconnect(_id, Status.Disconnected); + listener.processDisconnect(_id, _uuid, _name, Status.Disconnected); } int index = findRequest(seq); if (index >= 0) { @@ -243,6 +253,10 @@ public abstract class AgentAttache { return _id; } + public String getUuid() { + return _uuid; + } + public String getName() { return _name; } @@ -316,7 +330,7 @@ public abstract class AgentAttache { it.remove(); final Listener monitor = entry.getValue(); logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass()); - monitor.processDisconnect(_id, state); + monitor.processDisconnect(_id, _uuid, _name, state); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 27b3ac2d751..9333410e0aa 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -302,7 +302,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId()); + logger.warn("No handling of agent control command: {} sent from {}", cmd, attache); return new AgentControlAnswer(cmd); } @@ -344,7 +344,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl answer = easySend(targetHostId, cmd); } catch (final Exception e) { String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(), - host.getUuid(), e.getLocalizedMessage()); + host, e.getLocalizedMessage()); logger.error(errorMsg); logger.debug(errorMsg, e); } @@ -464,11 +464,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - logger.debug("Checking if agent ({}) is alive", hostId); + logger.debug("Checking if agent ({}) is alive", host); final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status); + logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", host, status); return status; } return _haMgr.investigate(hostId); @@ -493,7 +493,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public long send(final Long hostId, final Commands commands, final Listener listener) throws AgentUnavailableException { final AgentAttache agent = getAttache(hostId); if (agent.isClosed()) { - throw new AgentUnavailableException("Agent " + agent.getId() + " is closed", agent.getId()); + throw new AgentUnavailableException(String.format( + "Agent [id: %d, name: %s] is closed", + agent.getId(), agent.getName()), agent.getId()); } final Command[] cmds = checkForCommandsAndTag(commands); @@ -510,7 +512,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return; } final long hostId = attache.getId(); - logger.debug("Remove Agent : {}", hostId); + logger.debug("Remove Agent : {}", attache); AgentAttache removed = null; boolean conflict = false; synchronized (_agents) { @@ -522,7 +524,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } if (conflict) { - logger.debug("Agent for host {} is created when it is being disconnected", hostId); + logger.debug("Agent for host {} is created when it is being disconnected", attache); } if (removed != null) { removed.disconnect(nextState); @@ -530,7 +532,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl for (final Pair monitor : _hostMonitors) { logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName()); - monitor.second().processDisconnect(hostId, nextState); + monitor.second().processDisconnect(hostId, attache.getUuid(), attache.getName(), nextState); } } @@ -555,28 +557,31 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (e instanceof ConnectionException) { final ConnectionException ce = (ConnectionException)e; if (ce.isSetupError()) { - logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); + logger.warn("Monitor {} says there is an error in the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw ce; } else { - logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); + logger.info("Monitor {} says not to continue the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); return attache; } } else if (e instanceof HypervisorVersionChangedException) { handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); } else { - logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); + logger.error("Monitor {} says there is an error in the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); } } } } final Long dcId = host.getDataCenterId(); - final ReadyCommand ready = new ReadyCommand(dcId, host.getId(), NumbersUtil.enableHumanReadableSizes); + final ReadyCommand ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); ready.setWait(ReadyCommandWait.value()); final Answer answer = easySend(hostId, ready); if (answer == null || !answer.getResult()) { @@ -590,7 +595,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl Map detailsMap = readyAnswer.getDetailsMap(); if (detailsMap != null) { String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE); - logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid()); + logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host); if (uefiEnabled != null) { _hostDao.loadDetails(host); if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { @@ -707,14 +712,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if (discoverer == null) { - logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType()); + logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host, host.getHypervisorType()); resource = loadResourcesWithoutHypervisor(host); } else { resource = discoverer.reloadResource(host); } if (resource == null) { - logger.warn("Unable to load the resource: {}", host.getId()); + logger.warn("Unable to load the resource: {}", host); return false; } @@ -734,14 +739,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return h == null ? false : true; } else { - _executor.execute(new SimulateStartTask(host.getId(), resource, host.getDetails())); + _executor.execute(new SimulateStartTask(host.getId(), host.getUuid(), host.getName(), resource, host.getDetails())); return true; } } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { - logger.debug("create DirectAgentAttache for {}", host.getId()); - final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); + logger.debug("create DirectAgentAttache for {}", host); + final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { @@ -766,7 +771,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - logger.debug("Cant not find host {}", agent.getId()); + logger.debug("Cannot find host {}", agent); } else { if (!agent.forForward()) { agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); @@ -784,17 +789,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Status currentStatus = host.getStatus(); Status nextStatus; if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) { - logger.debug("Host {} is already {}", host.getUuid(), currentStatus); + logger.debug("Host {} is already {}", host, currentStatus); nextStatus = currentStatus; } else { try { nextStatus = currentStatus.getNextStatus(event); } catch (final NoTransitionException e) { - final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host.getUuid()); + final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host); logger.debug(err); throw new CloudRuntimeException(err); } - logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus); + logger.debug("The next status of agent {} is {}, current status is {}", host, nextStatus, currentStatus); } return nextStatus; } @@ -806,17 +811,18 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl GlobalLock joinLock = getHostJoinLock(hostId); if (joinLock.lock(60)) { try { - logger.info("Host {} is disconnecting with event {}", hostId, event); + logger.info("Host {} is disconnecting with event {}", + attache, event); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.warn("Can't find host with {}", hostId); + logger.warn("Can't find host with {} ({})", hostId, attache); nextStatus = Status.Removed; } else { nextStatus = getNextStatusOnDisconnection(host, event); caService.purgeHostCertificate(host); } - logger.debug("Deregistering link for {} with state {}", hostId, nextStatus); + logger.debug("Deregistering link for {} with state {}", attache, nextStatus); removeAgent(attache, nextStatus); @@ -851,28 +857,30 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ - logger.info("Investigating why host {} has disconnected with event", hostId, event); + logger.info("Investigating why host {} has disconnected with event", host, event); Status determinedState = investigate(attache); // if state cannot be determined do nothing and bail out if (determinedState == null) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value()); + logger.warn("Agent {} state cannot be determined for more than {} ({}) seconds, will go to Alert state", + host, AlertWait, AlertWait.value()); determinedState = Status.Alert; } else { - logger.warn("Agent {} state cannot be determined, do nothing", hostId); + logger.warn("Agent {} state cannot be determined, do nothing", host); return false; } } final Status currentStatus = host.getStatus(); - logger.info("The agent from host {} state determined is {}", hostId, determinedState); + logger.info("The agent from host {} state determined is {}", host, determinedState); if (determinedState == Status.Down) { - final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs"; + final String message = String.format("Host %s is down. Starting HA on the VMs", host); logger.error(message); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), + host.getPodId(), String.format("Host down, %s", host), message); } event = Status.Event.HostDown; } else if (determinedState == Status.Up) { @@ -881,21 +889,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl agentStatusTransitTo(host, Status.Event.Ping, _nodeId); return false; } else if (determinedState == Status.Disconnected) { - logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() + - '-' + host.getResourceState()); + logger.warn("Agent is disconnected but the host is still up: {} state: {}", host, host.getResourceState()); if (currentStatus == Status.Disconnected || (currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId()); + logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host); event = Status.Event.WaitedTooLong; } else { - logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId()); + logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host); return false; } } else if (currentStatus == Status.Up) { final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + final String hostDesc = "name: " + host.getName() + " (id:" + host.getUuid() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host disconnected, " + hostDesc, "If the agent for host [" + hostDesc + "] is not restarted within " + AlertWait + " seconds, host will go to Alert state"); @@ -907,12 +914,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); final String podName = podVO != null ? podVO.getName() : "NO POD"; - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podName; - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host in ALERT state, " + hostDesc, - "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName()); + final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podName); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, + host.getDataCenterId(), host.getPodId(), + String.format("Host in ALERT state, %s", hostDesc), + String.format("In availability zone %s, host is in alert state: %s", dcVO, host)); } } else { - logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId()); + logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host); } } handleDisconnectWithoutInvestigation(attache, event, true, true); @@ -958,7 +967,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final Status status = h.getStatus(); if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { - logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId); + logger.debug("Can not send command {} due to Host {} not being up", cmd, h); return null; } final Answer answer = send(hostId, cmd); @@ -1004,21 +1013,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (host.getRemoved() != null) { - throw new CloudRuntimeException("Host has already been removed: " + hostId); + throw new CloudRuntimeException(String.format( + "Host has already been removed: %s", host)); } if (host.getStatus() == Status.Disconnected) { - logger.debug("Host is already disconnected, no work to be done: {}", hostId); + logger.debug("Host is already disconnected, no work to be done: {}", host); return; } if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) { - throw new CloudRuntimeException("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); + throw new CloudRuntimeException(String.format( + "Unable to disconnect host because it is not in the correct state: host=%s; Status=%s", + host, host.getStatus())); } AgentAttache attache = findAttache(hostId); if (attache == null) { - throw new CloudRuntimeException("Unable to disconnect host because it is not connected to this server: " + hostId); + throw new CloudRuntimeException(String.format( + "Unable to disconnect host because it is not connected to this server: %s", + host)); } disconnectWithoutInvestigation(attache, Event.ShutdownRequested); } @@ -1043,9 +1057,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host {}", hostId); AgentAttache attache = null; attache = findAttache(hostId); + logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); } @@ -1055,7 +1069,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { reconnect(hostId); } catch (CloudRuntimeException e) { - logger.debug("Error on shutdown request for hostID: {}", hostId, e); + logger.debug("Error on shutdown request for hostID: {} ({})", hostId, findAttache(hostId), e); return false; } return true; @@ -1070,8 +1084,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { - logger.debug("create ConnectedAgentAttache for {}", host.getId()); - final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); + logger.debug("create ConnectedAgentAttache for {}", host); + final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -1118,7 +1132,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl joinLock.unlock(); } } else { - throw new ConnectionException(true, "Unable to acquire lock on host " + host.getUuid()); + throw new ConnectionException(true, + String.format("Unable to acquire lock on host %s", host)); } joinLock.releaseRef(); return attache; @@ -1131,7 +1146,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup); if (host != null) { checkHostArchOnCluster(host); - ready = new ReadyCommand(host.getDataCenterId(), host.getId(), NumbersUtil.enableHumanReadableSizes); + ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); attache = sendReadyAndGetAttache(host, ready, link, startup); } } catch (final Exception e) { @@ -1171,8 +1186,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl ServerResource resource; Map details; long id; + String uuid; + String name; - public SimulateStartTask(final long id, final ServerResource resource, final Map details) { + public SimulateStartTask(final long id, String uuid, String name, final ServerResource resource, final Map details) { this.id = id; this.resource = resource; this.details = details; @@ -1181,26 +1198,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void runInContext() { try { - logger.debug("Simulating start for resource {} id {}", resource.getName(), id); + logger.debug("Simulating start for resource {} (id: {}, uuid: {}, name {})", resource.getName(), id, uuid, name); if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { final AgentAttache agentattache = findAttache(id); if (agentattache == null) { - logger.debug("Creating agent for host {}", id); + logger.debug("Creating agent for host [id: {}, uuid: {}, name: {}]", id, uuid, name); _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); - logger.debug("Completed creating agent for host {}", id); + logger.debug("Completed creating agent for host [id: {}, uuid: {}, name: {}", id, uuid, name); } else { - logger.debug("Agent already created in another thread for host {}, ignore this", id); + logger.debug("Agent already created in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name); } } finally { tapLoadingAgents(id, TapAgentsAction.Del); } } else { - logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id); + logger.debug("Agent creation already getting processed in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name); } } catch (final Exception e) { - logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e); + logger.warn("Unable to simulate start on resource [id: {}, uuid: {}, name: {}] name {}", id, uuid, name, resource.getName(), e); } } } @@ -1240,7 +1257,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl cmd = cmds[i]; if (cmd instanceof StartupRoutingCommand || cmd instanceof StartupProxyCommand || cmd instanceof StartupSecondaryStorageCommand || cmd instanceof StartupStorageCommand) { - answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, "", "", mgmtServiceConf.getPingInterval()); break; } } @@ -1270,7 +1287,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) { logger.debug("{} is disabled for the cluster {}, cannot process the health check result " + - "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()); + "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host); return; } @@ -1280,10 +1297,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.info("Host health check {}, auto {} KVM host: {}", hostHealthCheckResult ? "succeeds" : "fails", hostHealthCheckResult ? "enabling" : "disabling", - host.getName()); + host); _resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent); } catch (NoTransitionException e) { - logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e); + logger.error("Cannot Auto {} host: {}", resourceEvent, host, e); } } @@ -1330,11 +1347,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { logD = false; - logger.debug("Ping from Routing host {}({})", hostId, hostName); + logger.debug("Ping from Routing host {}", attache); logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else if (cmd instanceof PingCommand) { logD = false; - logger.debug("Ping from {}({})", hostId, hostName); + logger.debug("Ping from {}", attache); logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else { logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request); @@ -1349,20 +1366,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (cmd instanceof StartupRoutingCommand) { final StartupRoutingCommand startup = (StartupRoutingCommand) cmd; processStartupRoutingCommand(startup, hostId); - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupProxyCommand) { final StartupProxyCommand startup = (StartupProxyCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupSecondaryStorageCommand) { final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupStorageCommand) { final StartupStorageCommand startup = (StartupStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail()); + logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache, reason, shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); @@ -1392,7 +1409,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl // gateway (cannot ping the default route) final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podVO); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc + "] lost connection to gateway (default route) and is possibly having network connection issues."); @@ -1410,7 +1427,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - logger.debug("Cant not find host {}", attache.getId()); + logger.debug("Cant not find host with id: {} ({})", attache.getId(), attache); } answer = new Answer(cmd); } else { @@ -1442,7 +1459,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (attache == null) { logger.warn("Unable to process: {}", response); } else if (!attache.processAnswers(response.getSequence(), response)) { - logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response); + logger.info("Host {} - Seq {}: Response is not processed: {}", attache, response.getSequence(), response); } } @@ -1512,14 +1529,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { try { _agentStatusLock.lock(); - logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName()); + logger.debug("[Resource state = {}, Agent event = , Host = {}]", + host.getResourceState(), e.toString(), host); host.setManagementServerId(msId); try { return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId); - throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage()); + logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); + throw new CloudRuntimeException(String.format( + "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); } } finally { _agentStatusLock.unlock(); @@ -1600,7 +1619,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl attache = createAttacheForDirectConnect(host, resource); final StartupAnswer[] answers = new StartupAnswer[cmds.length]; for (int i = 0; i < answers.length; i++) { - answers[i] = new StartupAnswer(cmds[i], attache.getId(), mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer(cmds[i], attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } attache.process(answers); @@ -1650,7 +1669,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public void pingBy(final long agentId) { // Update PingMap with the latest time if agent entry exists in the PingMap if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) { - logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); + logger.info("PingMap for agent: {} ({}) will not be updated because agent is no longer in the PingMap", agentId, findAttache(agentId)); } } @@ -1671,17 +1690,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl /* * Host is in non-operation state, so no investigation and direct put agent to Disconnected */ - logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState); + logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", h, resourceState); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { final HostVO host = _hostDao.findById(agentId); if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) { - logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId()); + logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { - logger.debug("Ping timeout for agent {}, do investigation", agentId); + logger.debug("Ping timeout for agent {}, do investigation", h); disconnectWithInvestigation(agentId, Event.PingTimeout); } } @@ -1844,7 +1863,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl Commands c = new Commands(cmds); send(host.getId(), c, this); } catch (AgentUnavailableException e) { - logger.debug("Failed to send host params on host: " + host.getId()); + logger.debug("Failed to send host params on host: {}", host); } } } @@ -1903,7 +1922,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl for (Long hostId : hostIds) { Answer answer = easySend(hostId, cmds); if (answer == null || !answer.getResult()) { - logger.error("Error sending parameters to agent {}", hostId); + logger.error("Error sending parameters to agent {} ({})", hostId, findAttache(hostId)); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java index 285ba4ffe60..e36b145c8bc 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java @@ -44,14 +44,14 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout s_clusteredAgentMgr = agentMgr; } - public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name) { - super(agentMgr, id, name, null, false); + public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name) { + super(agentMgr, id, uuid, name, null, false); _forward = true; _transferRequests = new LinkedList(); } - public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) { - super(agentMgr, id, name, link, maintenance); + public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) { + super(agentMgr, id, uuid, name, link, maintenance); _forward = link == null; _transferRequests = new LinkedList(); } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index a7fea0f2533..be327418205 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -216,10 +216,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - logger.debug("Loading directly connected host {}({})", host.getId(), host.getName()); + logger.debug("Loading directly connected host {}", host); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e); + logger.warn(" can not load directly connected host {}({}) due to ", + host, e); } } } @@ -243,10 +244,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return new ClusteredAgentHandler(type, link, data); } - protected AgentAttache createAttache(final long id) { - logger.debug("create forwarding ClusteredAgentAttache for {}", id); - final HostVO host = _hostDao.findById(id); - final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); + protected AgentAttache createAttache(final HostVO host) { + logger.debug("create forwarding ClusteredAgentAttache for {}", host); + long id = host.getId(); + final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(id); @@ -261,8 +262,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { - logger.debug("create ClusteredAgentAttache for {}", host.getId()); - final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); + logger.debug("create ClusteredAgentAttache for {}", host); + final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; synchronized (_agents) { @@ -278,7 +279,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("Create ClusteredDirectAgentAttache for {}.", host); - final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); + final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(host.getId()); @@ -321,15 +322,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host {}", hostId); final AgentAttache attache = findAttache(hostId); + logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { // don't process disconnect if the host is being rebalanced if (isAgentRebalanceEnabled()) { final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId); if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { - logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId); + logger.debug( + "Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is being connected to {}", + Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId); return true; } } @@ -338,7 +341,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // don't process disconnect if the disconnect came for the host via delayed cluster notification, // but the host has already reconnected to the current management server if (!attache.forForward()) { - logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId); + logger.debug( + "Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is directly connected to the current management server {}", + Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId); return true; } @@ -545,8 +550,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId); - agent = createAttache(hostId); + logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host); + agent = createAttache(host); } } if (agent == null) { @@ -712,12 +717,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public void onManagementNodeLeft(final List nodeList, final long selfNodeId) { for (final ManagementServerHost vo : nodeList) { - logger.info("Marking hosts as disconnected on Management server {}", vo.getMsid()); + logger.info("Marking hosts as disconnected on Management server {}", vo); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing); outOfBandManagementDao.expireServerOwnership(vo.getMsid()); haConfigDao.expireServerOwnership(vo.getMsid()); - logger.info("Deleting entries from op_host_transfer table for Management server {}", vo.getMsid()); + logger.info("Deleting entries from op_host_transfer table for Management server {}", vo); cleanupTransferMap(vo.getMsid()); } } @@ -744,7 +749,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id={}", agentId, e); + logger.warn("Unable to rebalance host id={} ({})", agentId, findAttache(agentId), e); } } return result; @@ -814,22 +819,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust List hostsToRebalance = new ArrayList(); for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) { - hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad); + hostsToRebalance = lbPlanner.getHostsToRebalance(node, avLoad); if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { break; } - logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); + logger.debug( + "Agent load balancer planner {} found no hosts to be rebalanced from management server {}", + lbPlanner.getName(), node); } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { - logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid()); + logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node); for (final HostVO host : hostsToRebalance) { final long hostId = host.getId(); - logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId); + logger.debug("Asking management server {} to give away host id={}", node, host); boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - logger.warn("Somebody else is already rebalancing host id: {}", hostId); + logger.warn("Somebody else is already rebalancing host: {}", host); continue; } @@ -838,11 +845,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); if (answer == null) { - logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid()); + logger.warn("Failed to get host {} from management server {}", host, node); result = false; } } catch (final Exception ex) { - logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex); + logger.warn("Failed to get host {} from management server {}", host, node, ex); result = false; } finally { if (transfer != null) { @@ -857,7 +864,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } } else { - logger.debug("Found no hosts to rebalance from the management server {}", node.getMsid()); + logger.debug("Found no hosts to rebalance from the management server {}", node); } } } @@ -902,7 +909,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return null; } - logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId); + logger.debug("Propagating agent change request event: {} to agent: {} ({})", event.toString(), agentId, findAttache(agentId)); final Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -942,14 +949,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut)); if (transferMap == null) { - logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId); + logger.debug("Timed out waiting for the host id={} ({}) to be ready to transfer, skipping rebalance for the host", hostId, attache); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId); + logger.debug(String.format("Management server %d doesn't own host id=%d (%s) any more, skipping rebalance for the host", _nodeId, hostId, attache)); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -957,7 +964,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { - logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms); + logger.debug("Can't transfer host {} ({}) as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, attache, ms); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -968,13 +975,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { - logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId); + logger.warn("Failed to submit rebalance task for host id={} ({}); postponing the execution", hostId, attache); continue; } } else { - logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}", - hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); + logger.debug("Agent {} ({}) can't be transferred yet as its request queue size is {} and listener queue size is {}", + hostId, attache, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } } } else { @@ -990,7 +997,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { - logger.debug("Adding agent {} to the list of agents to transfer", hostId); + logger.debug("Adding agent {} ({}) to the list of agents to transfer", hostId, findAttache(hostId)); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } @@ -1012,7 +1019,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } catch (final Exception ex) { - logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex); + logger.warn("Host {} ({}) failed to connect to the management server {} as a part of rebalance process", hostId, findAttache(hostId), futureOwnerId, ex); result = false; } @@ -1027,7 +1034,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName()); + logger.debug("Disconnecting host {} as a part of rebalance process without notification", host); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1035,21 +1042,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId); + logger.debug("Loading directly connected host {} to the management server {} as a part of rebalance process", host, _nodeId); result = loadDirectlyConnectedHost(host, true); } else { - logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName()); + logger.warn("Failed to disconnect {} as a part of rebalance process without notification", host); } } catch (final Exception ex) { - logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex); + logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId, ex); result = false; } if (result) { - logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); + logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); } else { - logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); + logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); } } @@ -1059,9 +1066,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { final boolean success = event == Event.RebalanceCompleted ? true : false; - logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event); final AgentAttache attache = findAttache(hostId); + logger.debug("Finishing rebalancing for the agent {} ({}) with event {}", hostId, attache, event); + if (attache == null || !(attache instanceof ClusteredAgentAttache)) { logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId); _hostTransferDao.completeAgentTransfer(hostId); @@ -1078,7 +1086,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // 2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { - logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId); + logger.debug("Forwarding request {} held in transfer attache [id: {}, uuid: {}, name: {}] from the management server {} to {}", + requestToTransfer.getSequence(), hostId, attache.getUuid(), attache.getName(), _nodeId, futureOwnerId); final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes()); if (!routeResult) { logD(requestToTransfer.getBytes(), "Failed to route request to peer"); @@ -1087,23 +1096,25 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust requestToTransfer = forwardAttache.getRequestToTransfer(); } - logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId); + logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance to {}", + _nodeId, hostId, attache.getUuid(), attache.getName(), futureOwnerId); } else { failRebalance(hostId); } - logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId); + logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance", _nodeId, hostId, attache.getUuid(), attache.getName()); _hostTransferDao.completeAgentTransfer(hostId); } protected void failRebalance(final long hostId) { + AgentAttache attache = findAttache(hostId); try { - logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId); + logger.debug("Management server {} failed to rebalance agent {} ({})", _nodeId, hostId, attache); _hostTransferDao.completeAgentTransfer(hostId); handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true); } catch (final Exception ex) { - logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId); + logger.warn("Failed to reconnect host id={} ({}) as a part of failed rebalance task cleanup", hostId, attache); } } @@ -1119,20 +1130,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId); if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); - final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); + final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host); if (forwardAttache == null) { - logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId); + logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", host); return false; } - logger.debug("Putting agent id={} to transfer mode", hostId); + logger.debug("Putting agent {} to transfer mode", host); forwardAttache.setTransferMode(true); _agents.put(hostId, forwardAttache); } else { if (attache == null) { - logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId); + logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", host, _nodeId); } else { - logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing", - hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); + logger.warn("Attache for the agent {} has request queue size {} and listener queue size {}, can't start host rebalancing", + host, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } return false; } @@ -1167,11 +1178,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void runInContext() { + AgentAttache attache = findAttache(hostId); try { - logger.debug("Rebalancing host id={}", hostId); + logger.debug("Rebalancing host id={} ({})", hostId, attache); rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id={}", hostId, e); + logger.warn("Unable to rebalance host id={} ({})", hostId, attache, e); } } } @@ -1260,7 +1272,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) { final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0]; - logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId()); + logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId())); boolean result = false; try { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java index ac1076a9ff0..e36ea6cedc1 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java @@ -26,8 +26,8 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ClusteredDirectAgentAttache extends DirectAgentAttache implements Routable { private final long _nodeId; - public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String name, long mgmtId, ServerResource resource, boolean maintenance) { - super(agentMgr, id, name, resource, maintenance); + public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String uuid, String name, long mgmtId, ServerResource resource, boolean maintenance) { + super(agentMgr, id, uuid, name, resource, maintenance); _nodeId = mgmtId; } @@ -37,9 +37,9 @@ public class ClusteredDirectAgentAttache extends DirectAgentAttache implements R try { req = Request.parse(data); } catch (ClassNotFoundException e) { - throw new CloudRuntimeException("Unable to rout to an agent ", e); + throw new CloudRuntimeException("Unable to route to an agent ", e); } catch (UnsupportedVersionException e) { - throw new CloudRuntimeException("Unable to rout to an agent ", e); + throw new CloudRuntimeException("Unable to route to an agent ", e); } if (req instanceof Response) { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java index c8e24301b29..523f98fd010 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -31,8 +31,8 @@ public class ConnectedAgentAttache extends AgentAttache { protected Link _link; - public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) { - super(agentMgr, id, name, maintenance); + public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); _link = link; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 927da34104f..07d5bf80393 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -51,8 +51,8 @@ public class DirectAgentAttache extends AgentAttache { AtomicInteger _outstandingTaskCount; AtomicInteger _outstandingCronTaskCount; - public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, ServerResource resource, boolean maintenance) { - super(agentMgr, id, name, maintenance); + public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String uuid,String name, ServerResource resource, boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); _resource = resource; _outstandingTaskCount = new AtomicInteger(0); _outstandingCronTaskCount = new AtomicInteger(0); @@ -60,7 +60,7 @@ public class DirectAgentAttache extends AgentAttache { @Override public void disconnect(Status state) { - logger.debug("Processing disconnect {}({})", _id, _name); + logger.debug("Processing disconnect [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); for (ScheduledFuture future : _futures) { future.cancel(false); @@ -115,7 +115,7 @@ public class DirectAgentAttache extends AgentAttache { if (answers != null && answers[0] instanceof StartupAnswer) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); - logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval); + logger.info("StartupAnswer received [id: {}, uuid: {}, name: {}, interval: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName(), interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } } @@ -126,7 +126,7 @@ public class DirectAgentAttache extends AgentAttache { assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?"; synchronized (this) { if (_resource != null) { - logger.warn("Lost attache for {}({})", _id, _name); + logger.warn("Lost attache for [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); disconnect(Status.Alert); } } @@ -140,7 +140,8 @@ public class DirectAgentAttache extends AgentAttache { } private synchronized void scheduleFromQueue() { - logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get()); + logger.trace("Agent attache [id: {}, uuid: {}, name: {}], task queue size={}, outstanding tasks={}", + _id, _uuid, _name, tasks.size(), _outstandingTaskCount.get()); while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); _agentMgr.getDirectAgentPool().execute(tasks.remove()); @@ -152,7 +153,9 @@ public class DirectAgentAttache extends AgentAttache { protected synchronized void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); + logger.warn( + "PingTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _uuid, _name, _agentMgr.getDirectAgentThreadCap()); return; } @@ -167,21 +170,21 @@ public class DirectAgentAttache extends AgentAttache { } if (cmd == null) { - logger.warn("Unable to get current status on {}({})", _id, _name); + logger.warn("Unable to get current status on [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); return; } if (cmd.getContextParam("logid") != null) { ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - logger.debug("Ping from {}({})", _id, _name); + logger.debug("Ping from [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); long seq = _seq++; logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString()); _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd}); } else { - logger.debug("Unable to send ping because agent is disconnected {}", _id, _name); + logger.debug("Unable to send ping because agent is disconnected [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); } } catch (Exception e) { logger.warn("Unable to complete the ping task", e); @@ -219,7 +222,9 @@ public class DirectAgentAttache extends AgentAttache { long seq = _req.getSequence(); try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); + logger.warn( + "CronTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _uuid, _name, _agentMgr.getDirectAgentThreadCap()); bailout(); return; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java index 7ee524076bb..2f15e7af43c 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java @@ -22,8 +22,8 @@ import com.cloud.host.Status; public class DummyAttache extends AgentAttache { - public DummyAttache(AgentManagerImpl agentMgr, long id, String name, boolean maintenance) { - super(agentMgr, id, name, maintenance); + public DummyAttache(AgentManagerImpl agentMgr, long id, String uuid, String name, boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); } @Override diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java index 7d139e5be14..e73776d134d 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java @@ -18,11 +18,12 @@ package com.cloud.cluster.agentlb; import java.util.List; +import com.cloud.cluster.ManagementServerHostVO; import com.cloud.host.HostVO; import com.cloud.utils.component.Adapter; public interface AgentLoadBalancerPlanner extends Adapter { - List getHostsToRebalance(long msId, int avLoad); + List getHostsToRebalance(ManagementServerHostVO ms, int avLoad); } diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java index 641ae441480..5b05b4df042 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java @@ -26,6 +26,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.cluster.ManagementServerHostVO; import org.springframework.stereotype.Component; import com.cloud.host.Host; @@ -43,15 +44,17 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements HostDao _hostDao = null; @Override - public List getHostsToRebalance(long msId, int avLoad) { + public List getHostsToRebalance(ManagementServerHostVO ms, int avLoad) { + long msId = ms.getMsid(); QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); sc.and(sc.entity().getManagementServerId(), Op.EQ, msId); List allHosts = sc.list(); if (allHosts.size() <= avLoad) { - logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + - "; so it doesn't participate in agent rebalancing process"); + logger.debug("Agent load = {} for management server {} doesn't exceed average " + + "system agent load = {}; so it doesn't participate in agent rebalancing process", + allHosts.size(), ms, avLoad); return null; } @@ -62,8 +65,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements List directHosts = sc.list(); if (directHosts.isEmpty()) { - logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + - "; so it doesn't participate in agent rebalancing process"); + logger.debug("No direct agents in status {} exist for the management server " + + "{}; so it doesn't participate in agent rebalancing process", + Status.Up, ms); return null; } @@ -88,8 +92,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements int hostsLeft = directHosts.size(); List hostsToReturn = new ArrayList(); - logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + - " and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away..."); + logger.debug("Management server {} can give away {} as it currently owns {} and the " + + "average agent load in the system is {}; finalyzing list of hosts to give away...", + ms, hostsToGive, allHosts.size(), avLoad); for (Long cluster : hostToClusterMap.keySet()) { List hostsInCluster = hostToClusterMap.get(cluster); hostsLeft = hostsLeft - hostsInCluster.size(); @@ -113,7 +118,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements } } - logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); + logger.debug("Management server {} is ready to give away {} hosts", ms, hostsToReturn.size()); return hostsToReturn; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 7c107ed6f54..a8b0130bdbc 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -480,7 +480,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { - logger.info("allocating virtual machine from template:{} with hostname:{} and {} networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size()); + logger.info("allocating virtual machine from template: {} with hostname: {} and {} networks", template, vmInstanceName, auxiliaryNetworks.size()); VMInstanceVO persistedVm = null; try { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); @@ -1196,8 +1196,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final Long clusterIdSpecified = planToDeploy.getClusterId(); if (clusterIdSpecified != null && rootVolClusterId != null) { if (!rootVolClusterId.equals(clusterIdSpecified)) { - logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + - rootVolClusterId + ", cluster specified: " + clusterIdSpecified); + logger.debug("Cannot satisfy the deployment plan passed in since " + + "the ready Root volume is in different cluster. volume's cluster: {}, cluster specified: {}", + () -> _clusterDao.findById(rootVolClusterId), () -> _clusterDao.findById(clusterIdSpecified)); throw new ResourceUnavailableException( "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified); @@ -1320,8 +1321,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac syncDiskChainChange(startAnswer); if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) { - logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm.getUuid(), vm.getState(), Event.OperationSucceeded); - throw new ConcurrentOperationException("Failed to deploy VM"+ vm.getUuid()); + logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm, vm.getState(), Event.OperationSucceeded); + throw new ConcurrentOperationException(String.format("Failed to deploy VM %s", vm)); } final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice(); @@ -1348,10 +1349,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } return; } catch (final Exception e) { - logger.error("Retrying after catching exception while trying to secure agent for systemvm id={}", vm.getId(), e); + logger.error("Retrying after catching exception while trying to secure agent for systemvm {}", vm, e); } } - throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId()); + throw new CloudRuntimeException(String.format("Failed to setup and secure agent for systemvm %s", vm)); } return; } else { @@ -1390,7 +1391,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } catch (OperationTimedoutException e) { - logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm.getUuid()); + logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm); if (e.isActive()) { _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop); } @@ -1745,7 +1746,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final List pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, vm.getId()); if (CollectionUtils.isNotEmpty(pendingWorkJobs) || _haMgr.hasPendingHaWork(vm.getId())) { - String msg = "There are pending jobs or HA tasks working on the VM with id: " + vm.getId() + ", can't unmanage the VM."; + String msg = String.format("There are pending jobs or HA tasks working on the VM: %s, can't unmanage the VM.", vm); logger.info(msg); throw new ConcurrentOperationException(msg); } @@ -2124,8 +2125,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } else { HostVO host = _hostDao.findById(hostId); if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) { - logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: {} is not allowed", vm.getId()); - throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode"); + logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM: {} is not allowed", vm); + throw new CloudRuntimeException(String.format("Stop VM operation on the VM %s is not allowed as host is preparing for maintenance mode", vm)); } } @@ -2509,7 +2510,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); logger.debug("Found {} volumes for VM {}(uuid:{}, id:{})", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); for (VolumeObjectTO result : results ) { - logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getId(), result.getPath(), result.getDataStoreUuid()); + logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getUuid(), result.getPath(), result.getDataStoreUuid()); VolumeVO volume = _volsDao.findById(result.getId()); StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid()); if (volume == null || pool == null) { @@ -2660,14 +2661,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) { logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: {} from source host: {}", - vm.getInstanceName(), srcHost.getId()); + vm, srcHost); final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); uvc.setCleanupVmFiles(true); try { _agentMgr.send(srcHost.getId(), uvc); } catch (AgentUnavailableException | OperationTimedoutException e) { - throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() + - " after successfully migrating VM's storage across VMware Datacenters", e); + throw new CloudRuntimeException(String.format( + "Failed to unregister VM: %s from source host: %s after successfully migrating VM's storage across VMware Datacenters", + vm, srcHost), e); } } @@ -2722,10 +2724,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: {}", - dest.getHost().getId()); - throw new CloudRuntimeException( - "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " - + dest.getHost().getId()); + dest.getHost()); + throw new CloudRuntimeException(String.format( + "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: %s", + dest.getHost())); } } } @@ -2852,13 +2854,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException e) { - logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: {}", fromHost, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - logger.warn("Error while checking the vm {} on host {}", vm, dstHostId, e); + logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e); } migrated = true; } finally { @@ -3302,7 +3304,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: {}", srcHost, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("VM not found on destination host. Unable to complete migration for " + vm); @@ -3834,9 +3836,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return; } - logger.debug("Received startup command from hypervisor host. host id: {}", agent.getId()); + logger.debug("Received startup command from hypervisor host. host: {}", agent); - _syncMgr.resetHostSyncState(agent.getId()); + _syncMgr.resetHostSyncState(agent); if (forRebalance) { logger.debug("Not processing listener {} as connect happens on rebalance process", this); @@ -3851,7 +3853,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final long seq_no = _agentMgr.send(agentId, new Commands(syncVMMetaDataCmd), this); logger.debug("Cluster VM metadata sync started with jobid {}", seq_no); } catch (final AgentUnavailableException e) { - logger.fatal("The Cluster VM metadata sync process failed for cluster id {} with {}", clusterId, e); + logger.fatal("The Cluster VM metadata sync process failed for cluster {} with {}", _clusterDao.findById(clusterId), e); } } } @@ -4224,10 +4226,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac logger.debug("Not need to remove the vm {} from network {} as the vm doesn't have nic in this network.", vm, network); return true; } - throw new ConcurrentOperationException("Unable to lock nic " + nic.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock nic %s", nic)); } - logger.debug("Lock is acquired for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network); + logger.debug("Lock is acquired for nic {} as a part of remove vm {} from network {}", lock, vm, network); try { final NicProfile nicProfile = @@ -4256,7 +4258,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return true; } finally { _nicsDao.releaseFromLockTable(lock.getId()); - logger.debug("Lock is released for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network); + logger.debug("Lock is released for nic {} as a part of remove vm {} from network {}", lock, vm, network); } } @@ -4348,9 +4350,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac vm.getServiceOfferingId(); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); - Host srcHost = _hostDao.findById(srcHostId); if (fromHost == null) { - String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHost); + String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHostId); logger.info(logMessageUnableToFindHost); throw new CloudRuntimeException(logMessageUnableToFindHost); } @@ -4359,7 +4360,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long destHostClusterId = dest.getCluster().getId(); long fromHostClusterId = fromHost.getClusterId(); if (fromHostClusterId != destHostClusterId) { - String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", srcHost); + String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", fromHost); logger.info(logMessageHostsOnDifferentCluster); throw new CloudRuntimeException(logMessageHostsOnDifferentCluster); } @@ -4406,7 +4407,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (pfma == null || !pfma.getResult()) { final String details = pfma != null ? pfma.getDetails() : "null answer returned"; pfma = null; - throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dstHostId, details), dstHostId); + throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dest.getHost(), details), dstHostId); } } catch (final OperationTimedoutException e1) { throw new AgentUnavailableException("Operation timed out", dstHostId); @@ -4466,7 +4467,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - logger.error("Unable to cleanup source host [{}] due to [{}].", srcHostId, e.getMessage(), e); + logger.error("Unable to cleanup source host [{}] due to [{}].", fromHost, e.getMessage(), e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); @@ -4801,7 +4802,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac logger.warn("VM {} no longer exists when processing VM state report.", vmId); } } else { - logger.info("There is pending job or HA tasks working on the VM. vm id: {}, postpone power-change report by resetting power-change counters.", vmId ); + logger.info("There is pending job or HA tasks working on the VM. vm: {}, postpone power-change report by resetting power-change counters.", () -> _vmDao.findById(vmId)); _vmDao.resetVmPowerStateTracking(vmId); } } @@ -4842,7 +4843,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac case Running: try { if (vm.getHostId() != null && !vm.getHostId().equals(vm.getPowerHostId())) { - logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); + logger.info("Detected out of band VM migration from host {} to host {}", () -> _hostDao.findById(vm.getHostId()), () -> _hostDao.findById(vm.getPowerHostId())); } stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { @@ -4871,22 +4872,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac case Destroyed: case Expunging: - logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm.getId(), vm.getState()); + logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm, vm.getState()); break; case Migrating: - logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState()); + logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm, vm.getState()); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { logger.warn("Unexpected VM state transition exception, race-condition?", e); } - logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName()); + logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm); break; case Error: default: - logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm.getId(), vm.getState()); + logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm, vm.getState()); break; } } @@ -4901,16 +4902,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), getApiCommandResourceTypeForVm(vm).toString()); case Migrating: logger.info("VM {} is at {} and we received a {} report while there is no pending jobs on it" - , vm.getInstanceName(), vm.getState(), vm.getPowerState()); + , vm, vm.getState(), vm.getPowerState()); if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { - logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm.getInstanceName()); + logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm); if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { - logger.info("VM {} already has a pending HA task working on it.", vm.getInstanceName()); + logger.info("VM {} already has a pending HA task working on it.", vm); } return; } @@ -4937,10 +4938,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), - VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() - + " -> Stopped) from out-of-context transition."); + VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) state is sync-ed (%s -> Stopped) from out-of-context transition.", + vm.getHostName(), vm, vm.getState())); - logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm.getInstanceName()); + logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm); break; @@ -4983,8 +4984,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final VMInstanceVO vm = _vmDao.findById(vmId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), - VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") is stuck in " + vm.getState() - + " state and its host is unreachable for too long"); + VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) is stuck in %s state and its host is unreachable for too long", + vm.getHostName(), vm, vm.getState())); } } @@ -5502,7 +5503,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { - logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm.getId(), e); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm, e); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } @@ -5794,18 +5795,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public Pair findClusterAndHostIdForVm(VirtualMachine vm, boolean skipCurrentHostForStartingVm) { Long hostId = null; + Host host = null; if (!skipCurrentHostForStartingVm || !State.Starting.equals(vm.getState())) { hostId = vm.getHostId(); } Long clusterId = null; - if(hostId == null) { - hostId = vm.getLastHostId(); - logger.debug("host id is null, using last host id {}", hostId); - } if (hostId == null) { - return findClusterAndHostIdForVmFromVolumes(vm.getId()); + if (vm.getLastHostId() == null) { + return findClusterAndHostIdForVmFromVolumes(vm.getId()); + } + hostId = vm.getLastHostId(); + host = _hostDao.findById(hostId); + logger.debug("host id is null, using last host {} with id {}", host, hostId); } - HostVO host = _hostDao.findById(hostId); + host = host == null ? _hostDao.findById(hostId) : host; if (host != null) { clusterId = host.getClusterId(); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java index b2a48a026a3..0f399cf4381 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java @@ -19,15 +19,14 @@ package com.cloud.vm; import java.util.Map; import com.cloud.agent.api.HostVmStateReportEntry; +import com.cloud.host.Host; public interface VirtualMachinePowerStateSync { - void resetHostSyncState(long hostId); + void resetHostSyncState(Host hostId); void processHostVmStateReport(long hostId, Map report); // to adapt legacy ping report void processHostVmStatePingReport(long hostId, Map report, boolean force); - - Map convertVmStateReport(Map states); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 4c89a75d215..94dddfdf18a 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -24,6 +24,10 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.utils.Pair; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.logging.log4j.Logger; @@ -40,54 +44,57 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Inject MessageBus _messageBus; @Inject VMInstanceDao _instanceDao; + @Inject HostDao hostDao; @Inject ManagementServiceConfiguration mgmtServiceConf; public VirtualMachinePowerStateSyncImpl() { } @Override - public void resetHostSyncState(long hostId) { - logger.info("Reset VM power state sync for host: {}.", hostId); - _instanceDao.resetHostPowerStateTracking(hostId); + public void resetHostSyncState(Host host) { + logger.info("Reset VM power state sync for host: {}", host); + _instanceDao.resetHostPowerStateTracking(host.getId()); } @Override public void processHostVmStateReport(long hostId, Map report) { - logger.debug("Process host VM state report. host: {}.", hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Process host VM state report. host: {}", host); - Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo, false); + Map> translatedInfo = convertVmStateReport(report); + processReport(host, translatedInfo, false); } @Override public void processHostVmStatePingReport(long hostId, Map report, boolean force) { - logger.debug("Process host VM state report from ping process. host: {}.", hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Process host VM state report from ping process. host: {}", host); - Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo, force); + Map> translatedInfo = convertVmStateReport(report); + processReport(host, translatedInfo, force); } - private void processReport(long hostId, Map translatedInfo, boolean force) { + private void processReport(HostVO host, Map> translatedInfo, boolean force) { - logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size()); + logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size()); - for (Map.Entry entry : translatedInfo.entrySet()) { + for (Map.Entry> entry : translatedInfo.entrySet()) { - logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); + logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); + if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) { + logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); } else { - logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey()); + logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second()); } } // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(hostId, VirtualMachine.State.Running, + List vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); java.util.Iterator it = vmsThatAreMissingReport.iterator(); while (it.hasNext()) { @@ -99,7 +106,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat // here we need to be wary of out of band migration as opposed to other, more unexpected state changes if (vmsThatAreMissingReport.size() > 0) { Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); + logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime()); // 2 times of sync-update interval for graceful period long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; @@ -109,60 +116,55 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat // Make sure powerState is up to date for missing VMs try { if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId()); + logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance); _instanceDao.resetVmPowerStateTracking(instance.getId()); continue; } } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm", e); + logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e); continue; } Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId()); + logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance); vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId()); + logger.warn("VM update time is null, falling back to creation time for vm: {}", instance); vmStateUpdateTime = instance.getCreated(); } } String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}" - , hostId - , instance.getId() - , instance.getUuid() - , VirtualMachine.PowerState.PowerReportMissing - , lastTime); + logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}", + host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime); long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate); + logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate); // this is were a race condition might have happened if we don't re-fetch the instance; // between the startime of this job and the currentTime of this missing-branch // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId()); + if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) { + logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); } else { - logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId()); + logger.debug("VM power state does not change, skip DB writing. vm: {}", instance); } } else { - logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate); + logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate); } } } - logger.debug("Done with process of VM state report. host: {}", hostId); + logger.debug("Done with process of VM state report. host: {}", host); } - @Override - public Map convertVmStateReport(Map states) { - final HashMap map = new HashMap(); + public Map> convertVmStateReport(Map states) { + final HashMap> map = new HashMap<>(); if (states == null) { return map; } @@ -170,9 +172,9 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat for (Map.Entry entry : states.entrySet()) { VMInstanceVO vm = findVM(entry.getKey()); if (vm != null) { - map.put(vm.getId(), entry.getValue().getState()); + map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm)); } else { - logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey()); + logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue()); } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java index b7c82ce5c21..1b050ffd9de 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java @@ -67,8 +67,8 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi try { List joinRecords = _joinMapDao.listJoinRecords(job.getId()); if (joinRecords.size() != 1) { - logger.warn("AsyncJob-" + job.getId() - + " received wakeup call with un-supported joining job number: " + joinRecords.size()); + logger.warn("AsyncJob-{} ({}) received wakeup call with un-supported " + + "joining job number: {}", job.getId(), job, joinRecords.size()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); @@ -82,7 +82,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi try { workClz = Class.forName(job.getCmd()); } catch (ClassNotFoundException e) { - logger.error("VM work class " + job.getCmd() + " is not found", e); + logger.error("VM work class {} for job {} is not found", job.getCmd(), job, e); return; } @@ -103,14 +103,13 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi handler.invoke(_vmMgr); } else { assert (false); - logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + - " when waking up job-" + job.getId()); + logger.error("Unable to find wakeup handler {} when waking up job-{} ({})", joinRecord.getWakeupHandler(), job.getId(), job); } } finally { CallContext.unregister(); } } catch (Throwable e) { - logger.warn("Unexpected exception in waking up job-" + job.getId()); + logger.warn("Unexpected exception in waking up job-{} ({})", job.getId(), job); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java index 41366f73a01..e48481324df 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java @@ -96,7 +96,7 @@ public class DataCenterResourceManagerImpl implements DataCenterResourceManager public EngineClusterVO loadCluster(String uuid) { EngineClusterVO cluster = _clusterDao.findByUuid(uuid); if (cluster == null) { - throw new InvalidParameterValueException("Pod does not exist"); + throw new InvalidParameterValueException("Cluster does not exist"); } return cluster; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java index c00d939b3df..19b0e773cd0 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEnti import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State.Event; import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -264,4 +265,11 @@ public class EngineClusterVO implements EngineCluster, Identity { public PartitionType partitionType() { return PartitionType.Cluster; } + + @Override + public String toString() { + return String.format("EngineCluster %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java index 57382530f40..5f1203c024a 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java @@ -43,6 +43,7 @@ import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "data_center") @@ -523,4 +524,11 @@ public class EngineDataCenterVO implements EngineDataCenter, Identity { public DataCenter.Type getType() { return type; } + + @Override + public String toString() { + return String.format("EngineDataCenter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java index 684b882fe8a..95931d5b72d 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java @@ -38,6 +38,7 @@ import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host_pod_ref") @@ -246,4 +247,11 @@ public class EngineHostPodVO implements EnginePod, Identity { public State getState() { return state; } + + @Override + public String toString() { + return String.format("EngineHostPod %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index d804f079e17..053d9ac218e 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -53,6 +53,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host") @@ -697,7 +698,9 @@ public class EngineHostVO implements EngineHost, Identity { @Override public String toString() { - return new StringBuilder("Host[").append("-").append(id).append("-").append(type).append("]").toString(); + return String.format("EngineHost %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java index cc33f9eb335..fa8b782f662 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java @@ -297,7 +297,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter {} with id={}, as there is no such dataCenter exists in the database anymore", vo, vo.getId()); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java index 03b4bd9eaaf..96dfdc00d67 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java @@ -300,7 +300,7 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java index 2099ebadb9f..2ad8d15d0b7 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java @@ -451,7 +451,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java index 535e396a376..58bbfcfc1a1 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java @@ -183,7 +183,7 @@ public class EngineHostPodDaoImpl extends GenericDaoBase .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index e1b798d16d6..7efc29b02a6 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1248,18 +1248,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra VlanVO vlanVo = _vlanDao.findByNetworkIdAndIpv4(network.getId(), requestedIpv4Address); if (vlanVo == null) { - throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network id='%s']", - requestedIpv4Address, network.getId())); + throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network '%s']", + requestedIpv4Address, network)); } String ipv4Gateway = vlanVo.getVlanGateway(); String ipv4Netmask = vlanVo.getVlanNetmask(); if (!NetUtils.isValidIp4(ipv4Gateway)) { - throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [VlanId='%s'] is not valid", ipv4Gateway, vlanVo.getId())); + throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Gateway, vlanVo.getId(), vlanVo.getUuid())); } if (!NetUtils.isValidIp4Netmask(ipv4Netmask)) { - throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [VlanId='%s'] is not valid", ipv4Netmask, vlanVo.getId())); + throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Netmask, vlanVo.getId(), vlanVo.getUuid())); } acquireLockAndCheckIfIpv4IsFree(network, requestedIpv4Address); @@ -1273,7 +1273,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra String macAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); nicProfile.setMacAddress(macAddress); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network id='%s']", network.getId()), e); + throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network %s]", network), e); } } } @@ -1285,7 +1285,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), requestedIpv4Address); if (ipVO == null) { throw new InvalidParameterValueException( - String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network id='%s']", requestedIpv4Address, network.getId())); + String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network %s]", requestedIpv4Address, network)); } try { IPAddressVO lockedIpVO = _ipAddressDao.acquireInLockTable(ipVO.getId()); @@ -1489,17 +1489,17 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host.getId()); + logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); } } catch (Exception e) { - logger.warn("Failed to connect to host: {}", host.getName()); + logger.warn("Failed to connect to host: {}", host); } } if (clusterToHostsMap.keySet().size() != clusterVOs.size()) { @@ -1526,7 +1526,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra NetworkVO network = _networksDao.findById(networkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); if (isNetworkImplemented(network)) { - logger.debug("Network id={} is already implemented", networkId); + logger.debug("Network {} is already implemented", network); implemented.set(guru, network); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_UPDATE, network.getAccountId(), network.getDataCenterId(), network.getId(), network.getName(), network.getNetworkOfferingId(), null, network.getState().name(), Network.class.getName(), network.getUuid(), true); @@ -1542,11 +1542,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra throw ex; } - logger.debug("Lock is acquired for network id {} as a part of network implement", networkId); + logger.debug("Lock is acquired for network id {} as a part of network implement", network); try { if (isNetworkImplemented(network)) { - logger.debug("Network id={} is already implemented", networkId); + logger.debug("Network {} is already implemented", network); implemented.set(guru, network); return implemented; } @@ -1618,7 +1618,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } _networksDao.releaseFromLockTable(networkId); - logger.debug("Lock is released for network id {} as a part of network implement", networkId); + logger.debug("Lock is released for network {} as a part of network implement", network); } } @@ -1743,57 +1743,57 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true); } if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) { - logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply firewall Egress rule(s) as a part of network {} restart", network); success = false; } // associate all ip addresses if (!_ipAddrMgr.applyIpAssociations(network, false)) { - logger.warn("Failed to apply ip addresses as a part of network id {} restart", networkId); + logger.warn("Failed to apply ip addresses as a part of network {} restart", network); success = false; } // apply BGP settings if (!bgpService.applyBgpPeers(network, false)) { - logger.warn("Failed to apply bpg peers as a part of network id {} restart", networkId); + logger.warn("Failed to apply bpg peers as a part of network {} restart", network); success = false; } // apply static nat - if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { - logger.warn("Failed to apply static nats a part of network id {} restart", networkId); + if (!_rulesMgr.applyStaticNatsForNetwork(network, false, caller)) { + logger.warn("Failed to apply static nats a part of network {} restart", network); success = false; } // apply firewall rules final List firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) { - logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network {} restart", network); success = false; } // apply port forwarding rules if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply port forwarding rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply port forwarding rule(s) as a part of network {} restart", network); success = false; } // apply static nat rules if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply static nat rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply static nat rule(s) as a part of network {} restart", network); success = false; } // apply public load balancer rules - if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { - logger.warn("Failed to reapply Public load balancer rules as a part of network id={} restart", networkId); + if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Public)) { + logger.warn("Failed to reapply Public load balancer rules as a part of network {} restart", network); success = false; } // apply internal load balancer rules - if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { - logger.warn("Failed to reapply internal load balancer rules as a part of network id={} restart", networkId); + if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Internal)) { + logger.warn("Failed to reapply internal load balancer rules as a part of network {} restart", network); success = false; } @@ -1803,7 +1803,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final RemoteAccessVpn vpn : vpnsToReapply) { // Start remote access vpn per ip if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) { - logger.warn("Failed to reapply vpn rules as a part of network id={} restart", networkId); + logger.warn("Failed to reapply vpn rules as a part of network {} restart", network); success = false; } } @@ -1811,7 +1811,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //apply network ACLs if (!_networkACLMgr.applyACLToNetwork(networkId)) { - logger.warn("Failed to reapply network ACLs as a part of of network id={}", networkId); + logger.warn("Failed to reapply network ACLs as a part of of network {}", network); success = false; } @@ -1922,13 +1922,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra long userId = User.UID_SYSTEM; //remove all PF/Static Nat rules for the network logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration", - services, network.getUuid(), network.getNetworkOfferingId()); + services::toString, network::toString, () -> _networkOfferingDao.findById(network.getNetworkOfferingId())); if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) { try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network); } else { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network); } if (services.contains(Service.StaticNat.getName())) { //removing static nat configured on ips. @@ -1947,7 +1947,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra }); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } } if (services.contains(Service.SourceNat.getName())) { @@ -1966,22 +1966,22 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (services.contains(Service.Lb.getName())) { //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) { - logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); + logger.debug("Successfully cleaned up load balancing rules for network {}", network); } else { - logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network); } } if (services.contains(Service.Firewall.getName())) { //revoke all firewall rules for the network try { - if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); + if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, userId, caller)) { + logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } } @@ -1991,7 +1991,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true); } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network.getUuid(), ex); + logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network, ex); } } } @@ -2088,20 +2088,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } @DB - protected void updateNic(final NicVO nic, final long networkId, final int count) { + protected void updateNic(final NicVO nic, final Network network, final int count) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { _nicDao.update(nic.getId(), nic); if (nic.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id={} on {}", networkId, count); - _networksDao.changeActiveNicsBy(networkId, count); + logger.debug(String.format("Changing active number of nics for network id=%s on %d", network, count)); + _networksDao.changeActiveNicsBy(network.getId(), count); } if (nic.getVmType() == VirtualMachine.Type.User - || nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(networkId).getTrafficType() == TrafficType.Guest) { - _networksDao.setCheckForGc(networkId); + || nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(network.getId()).getTrafficType() == TrafficType.Guest) { + _networksDao.setCheckForGc(network.getId()); } } }); @@ -2128,8 +2128,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final NicVO nic : nics) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); - throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); + NetworkVO network = _networksDao.findById(nic.getNetworkId()); + logger.warn("Failed to implement network: {} as a part of preparing nic {}", network, nic); + throw new CloudRuntimeException(String.format("Failed to implement network id=%s as a part preparing nic %s", network, nic)); } final NetworkVO network = implemented.second(); @@ -2194,7 +2195,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra Pair networks = getGuestNetworkRouterAndVpcDetails(vmProfile.getId()); setMtuDetailsInVRNic(networks, network, nic); } - updateNic(nic, network.getId(), 1); + updateNic(nic, network, 1); final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { @@ -2299,7 +2300,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) { - throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); + throw new CloudRuntimeException(String.format("Service provider %s either doesn't exist or is not enabled in physical network: %s", element.getProvider().getName(), _physicalNetworkDao.findById(network.getPhysicalNetworkId()))); } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { @@ -2324,10 +2325,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (nic == null && !addedURIs.contains(broadcastUri.toString())) { //Nic details are not available in DB //Create nic profile for migration - logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm.getId()); final NetworkVO network = _networksDao.findById(ntwkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NicProfile profile = new NicProfile(); + logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), network, vm); profile.setDeviceId(255); //dummyId profile.setIPv4Address(userIp.getAddress().toString()); profile.setIPv4Netmask(publicIp.getNetmask()); @@ -2467,7 +2468,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra applyProfileToNicForRelease(nic, profile); nic.setState(Nic.State.Allocated); if (originalState == Nic.State.Reserved) { - updateNic(nic, network.getId(), -1); + updateNic(nic, network, -1); } else { _nicDao.update(nic.getId(), nic); } @@ -2476,7 +2477,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return new Pair<>(network, profile); } else { nic.setState(Nic.State.Allocated); - updateNic(nic, network.getId(), -1); + updateNic(nic, network, -1); } } @@ -2513,7 +2514,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public void cleanupNics(final VirtualMachineProfile vm) { - logger.debug("Cleaning network for vm: {}", vm.getId()); + logger.debug("Cleaning network for vm: {}", vm); final List nics = _nicDao.listByVmId(vm.getId()); for (final NicVO nic : nics) { @@ -2610,7 +2611,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _nicDao.remove(nic.getId()); } - logger.debug("Removed nic id={}", nic.getId()); + logger.debug("Removed nic {}", nic); // release assigned IPv6 for Isolated Network VR NIC if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType()) @@ -2623,7 +2624,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //remove the secondary ip addresses corresponding to this nic if (!removeVmSecondaryIpsOfNic(nic.getId())) { - logger.debug("Removing nic {} secondary ip addresses failed", nic.getId()); + logger.debug("Removing nic {} secondary ip addresses failed", nic); } } @@ -2837,16 +2838,21 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) && _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) { - throw new InvalidParameterValueException("The VLAN tag for isolated PVLAN " + isolatedPvlan + " is already being used for dynamic vlan allocation for the guest network in zone " - + zone.getName()); + throw new InvalidParameterValueException(String.format( + "The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s", + isolatedPvlan, zone)); } if (!UuidUtils.isUuid(vlanId)) { // For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) { if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + vlanId + " already exists or overlaps with other network vlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network vlans in zone %s", + vlanId, zone)); } else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + isolatedPvlan + " already exists or overlaps with other network vlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network vlans in zone %s", + isolatedPvlan, zone)); } else { final List dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri)); //for the network that is created as part of private gateway, @@ -2878,7 +2884,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or // shared network with same Vlan ID in the zone if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) { - throw new InvalidParameterValueException("There is an existing isolated/shared network that overlaps with vlan id:" + vlanId + " in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone)); } } } @@ -2893,7 +2900,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) { if (networkDomain != null) { // TBD: NetworkOfferingId and zoneId. Send uuids instead. - throw new InvalidParameterValueException("Domain name change is not supported by network offering id=" + networkOfferingId + " in zone id=" + zoneId); + throw new InvalidParameterValueException(String.format( + "Domain name change is not supported by network offering id=%d in zone %s", + networkOfferingId, zone)); } } else { if (networkDomain == null) { @@ -3028,8 +3037,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + vlanIdFinal + - " already exists or overlaps with other network pvlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network pvlans in zone %s", + vlanIdFinal, zone)); } userNetwork.setBroadcastUri(uri); @@ -3044,9 +3054,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } URI uri = NetUtils.generateUriForPvlan(vlanIdFinal, isolatedPvlan, isolatedPvlanType.toString()); if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString(), isolatedPvlanType).size() > 0) { - throw new InvalidParameterValueException("Network with primary vlan " + vlanIdFinal + - " and secondary vlan " + isolatedPvlan + " type " + isolatedPvlanType + - " already exists or overlaps with other network pvlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with primary vlan %s and secondary vlan %s type %s already exists or overlaps with other network pvlans in zone %s", + vlanIdFinal, isolatedPvlan, isolatedPvlanType, zone)); } userNetwork.setBroadcastUri(uri); userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan); @@ -3189,7 +3199,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean result = false; if (success) { - logger.debug("Network id={} is shutdown successfully, cleaning up corresponding resources now.", networkId); + logger.debug("Network {} is shutdown successfully, cleaning up corresponding resources now.", networkFinal); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); @@ -3250,14 +3260,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } if (cleanupNeeded) { - cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId()); + cleanupResult = shutdownNetworkResources(network, context.getAccount(), context.getCaller().getId()); } } catch (final Exception ex) { logger.warn("shutdownNetworkRules failed during the network {} shutdown due to", network, ex); } finally { // just warn the administrator that the network elements failed to shutdown if (!cleanupResult) { - logger.warn("Failed to cleanup network id={} resources as a part of shutdownNetwork", network.getId()); + logger.warn("Failed to cleanup network {} resources as a part of shutdownNetwork", network); } } @@ -3299,15 +3309,15 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to); CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host.getId()); + logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); } } catch (Exception e) { - logger.warn("Failed to cleanup network resources on host: {}", host.getName()); + logger.warn("Failed to cleanup network resources on host: {}", host); } } } @@ -3337,7 +3347,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // Don't allow to delete network via api call when it has vms assigned to it final int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - logger.debug("The network id={} has active Nics, but shouldn't.", networkId); + logger.debug("The network {} has active Nics, but shouldn't.", network); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, -1 * nicCount); @@ -3367,7 +3377,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean success = true; if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) { - logger.warn("Unable to delete network id={}: failed to cleanup network resources", networkId); + logger.warn("Unable to delete network {}: failed to cleanup network resources", network); return false; } @@ -3396,7 +3406,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (success) { - logger.debug("Network id={} is destroyed successfully, cleaning up corresponding resources now.", networkId); + logger.debug("Network {} is destroyed successfully, cleaning up corresponding resources now.", network); final NetworkVO networkFinal = network; try { @@ -3495,7 +3505,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final VlanVO vlan : publicVlans) { VlanVO vlanRange = _configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount); if (vlanRange == null) { - logger.warn("Failed to delete vlan " + vlan.getId() + ");"); + logger.warn("Failed to delete vlan [id: {}, uuid: {}];", vlan.getId(), vlan.getUuid()); result = false; } else { deletedPublicVlanRange.add(vlanRange); @@ -3505,16 +3515,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //cleanup private vlans final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId); if (privateIpAllocCount > 0) { - logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", networkId); + logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", network); result = false; } else { _privateIpDao.deleteByNetworkId(networkId); - logger.debug("Deleted ip range for private network id={}", networkId); + logger.debug("Deleted ip range for private network {}", network); } // release vlans of user-shared networks without specifyvlan if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) { - logger.debug("Releasing vnet for the network id={}", network.getId()); + logger.debug("Releasing vnet for the network {}", network); _dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(), network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId()); } @@ -3560,10 +3570,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", networkId, currentTime); + logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", () -> _networksDao.findById(networkId), () -> currentTime); stillFree.put(networkId, currentTime); } else if (time > currentTime - netGcWait) { - logger.debug("Network {} is still free but it's not time to shutdown yet: {}",networkId, time); + logger.debug("Network {} is still free but it's not time to shutdown yet: {}", () -> _networksDao.findById(networkId), time::toString); stillFree.put(networkId, time); } else { shutdownList.add(networkId); @@ -3590,7 +3600,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra shutdownNetwork(networkId, context, false); } catch (final Exception e) { - logger.warn("Unable to shutdown network: {}", networkId); + logger.warn("Unable to shutdown network: {}", () -> _networksDao.findById(networkId)); } } } @@ -3630,7 +3640,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean restartRequired = false; final NetworkVO network = _networksDao.findById(networkId); - logger.debug("Restarting network {}...", networkId); + logger.debug("Restarting network {}...", network); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()); @@ -3985,51 +3995,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //remove all PF/Static Nat rules for the network try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network); } else { success = false; - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) { - logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); + logger.debug("Successfully cleaned up load balancing rules for network {}", network); } else { // shouldn't even come here as network is being cleaned up after all network elements are shutdown success = false; - logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network); } //revoke all firewall rules for the network try { - if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); + if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, callerUserId, caller)) { + logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { success = false; - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } //revoke all network ACLs for network try { if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - logger.debug("Successfully cleaned up NetworkACLs for network id={}", networkId); + logger.debug("Successfully cleaned up NetworkACLs for network {}", network); } else { success = false; - logger.warn("Failed to cleanup NetworkACLs as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup NetworkACLs as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; - logger.warn("Failed to cleanup Network ACLs as a part of network id={} cleanup due to resourceUnavailable ", networkId, ex); + logger.warn("Failed to cleanup Network ACLs as a part of network {} cleanup due to resourceUnavailable ", network, ex); } //release all ip addresses @@ -4047,7 +4057,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra logger.debug("Portable IP address {} is no longer associated with any network", ipToRelease); } } else { - _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ipToRelease, network); } } @@ -4065,14 +4075,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return success; } - private boolean shutdownNetworkResources(final long networkId, final Account caller, final long callerUserId) { + private boolean shutdownNetworkResources(final Network network, final Account caller, final long callerUserId) { // This method cleans up network rules on the backend w/o touching them in the DB boolean success = true; - final Network network = _networksDao.findById(networkId); // Mark all PF rules as revoked and apply them on the backend (not in the DB) - final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), networkId); + final List pfRules = _portForwardingRulesDao.listByNetwork(network.getId()); + logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), network); for (final PortForwardingRuleVO pfRule : pfRules) { logger.trace("Marking pf rule {} with Revoke state", pfRule); @@ -4090,9 +4099,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } // Mark all static rules as revoked and apply them on the backend (not in the DB) - final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); + final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(network.getId(), Purpose.StaticNat); final List staticNatRules = new ArrayList(); - logger.debug("Releasing {} static nat rules for network id={} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), networkId); + logger.debug("Releasing {} static nat rules for network {} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), network); for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { logger.trace("Marking static nat rule {} with Revoke state", firewallStaticNatRule); @@ -4100,7 +4109,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId()); if (ip == null || !ip.isOneToOneNat() || ip.getAssociatedWithVmId() == null) { - throw new InvalidParameterValueException("Source ip address of the rule id=" + firewallStaticNatRule.getId() + " is not static nat enabled"); + throw new InvalidParameterValueException(String.format("Source ip address of the rule %s is not static nat enabled", firewallStaticNatRule)); } //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), firewallStaticNatRule.getNetworkId()); @@ -4119,7 +4128,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } try { - if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Public)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Public)) { logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); success = false; } @@ -4129,7 +4138,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } try { - if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Internal)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Internal)) { logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); success = false; } @@ -4139,8 +4148,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } // revoke all firewall rules for the network w/o applying them on the DB - final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - logger.debug("Releasing firewall ingress rules for network id={} as a part of shutdownNetworkRules", firewallRules.size(), networkId); + final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Ingress); + logger.debug("Releasing firewall ingress rules for network {} as a part of shutdownNetworkRules", firewallRules.size(), network); for (final FirewallRuleVO firewallRule : firewallRules) { logger.trace("Marking firewall ingress rule {} with Revoke state", firewallRule); @@ -4157,8 +4166,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra success = false; } - final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - logger.debug("Releasing {} firewall egress rules for network id={} as a part of shutdownNetworkRules", firewallEgressRules.size(), networkId); + final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Egress); + logger.debug("Releasing {} firewall egress rules for network {} as a part of shutdownNetworkRules", firewallEgressRules.size(), network); try { // delete default egress rule @@ -4166,7 +4175,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall) && (network.getGuestType() == Network.GuestType.Isolated || network.getGuestType() == Network.GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) { // add default egress rule to accept the traffic - _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(networkId), false); + _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(network.getId()), false); } } catch (final ResourceUnavailableException ex) { @@ -4190,11 +4199,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (network.getVpcId() != null) { - logger.debug("Releasing Network ACL Items for network id={} as a part of shutdownNetworkRules", networkId); + logger.debug("Releasing Network ACL Items for network {} as a part of shutdownNetworkRules", network); try { //revoke all Network ACLs for the network w/o applying them in the DB - if (!_networkACLMgr.revokeACLItemsForNetwork(networkId)) { + if (!_networkACLMgr.revokeACLItemsForNetwork(network.getId())) { logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); success = false; } @@ -4206,13 +4215,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } //release all static nats for the network - if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { - logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id {}", networkId); + if (!_rulesMgr.applyStaticNatForNetwork(network, false, caller, true)) { + logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network {}", network); success = false; } // Get all ip addresses, mark as releasing and release them on the backend - final List userIps = _ipAddressDao.listByAssociatedNetwork(networkId, null); + final List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); final List publicIpsToRelease = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { for (final IPAddressVO userIp : userIps) { @@ -4310,12 +4319,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd); if (answer == null) { - logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host.getId()); - throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId()); + logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host); + throw new ConnectionException(true, String.format("Unable to get an answer to the CheckNetworkCommand from agent: %s", host)); } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", hostId, answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg); throw new ConnectionException(true, msg); @@ -4471,8 +4480,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (prepare) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); - throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network {} as a part of preparing nic {}", network, nic); + throw new CloudRuntimeException(String.format("Failed to implement network %s as a part preparing nic %s", network, nic)); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network); @@ -4588,18 +4597,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final List providers = getProvidersForServiceInNetwork(network, service); //Only support one provider now if (providers == null) { - logger.error("Cannot find {} provider for network {}", service.getName(), network.getId()); + logger.error("Cannot find {} provider for network {}", service.getName(), network); return null; } if (providers.size() != 1 && service != Service.Lb) { //support more than one LB providers only - logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network.getId()); + logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network); return null; } for (final Provider provider : providers) { final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network.getId()); + logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network); elements.add(element); } return elements; @@ -4693,7 +4702,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public Pair importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced) throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { - logger.debug("Allocating nic for vm {} in network {} during import", vm.getUuid(), network); + logger.debug("Allocating nic for vm {} in network {} during import", vm, network); String selectedIp = null; if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) { if (ipAddresses.getIp4Address().equals("auto")) { @@ -4743,7 +4752,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra int count = 1; if (vo.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id={} on {}", network.getUuid(), count); + logger.debug("Changing active number of nics for network {} on {}", network, count); _networksDao.changeActiveNicsBy(network.getId(), count); } if (vo.getVmType() == VirtualMachine.Type.User @@ -4807,16 +4816,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra private String generateNewMacAddressIfForced(Network network, String macAddress, boolean forced) { if (!forced) { - throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network with ID " + network.getUuid() + + throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network " + network + " and forced flag is disabled"); } try { - logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network.getName(), macAddress); + logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network, macAddress); String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); logger.debug("Successfully generated the mac address {}, using it instead of the conflicting address {}", newMacAddress, macAddress); return newMacAddress; } catch (InsufficientAddressCapacityException e) { - String msg = String.format("Could not generate a new mac address on network %s", network.getName()); + String msg = String.format("Could not generate a new mac address on network %s", network); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -4824,7 +4833,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public void unmanageNics(VirtualMachineProfile vm) { - logger.debug("Unmanaging NICs for VM: {}", vm.getId()); + logger.debug("Unmanaging NICs for VM: {}", vm); VirtualMachine virtualMachine = vm.getVirtualMachine(); final List nics = _nicDao.listByVmId(vm.getId()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index ec5d5efb5cf..0773c20b6b9 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -151,7 +151,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates); if (files.isEmpty()) { - return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore.getId()), migrationPolicy.toString(), true); + return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore), migrationPolicy.toString(), true); } Map> storageCapacities = new Hashtable<>(); for (Long storeId : destDatastores) { @@ -159,7 +159,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); if (migrationPolicy == MigrationPolicy.COMPLETE) { - logger.debug("Setting source image store: {} to read-only", srcDatastore.getId()); + logger.debug("Setting source image store: {} to read-only", srcDatastore); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -309,8 +309,9 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra message += "Image stores have been attempted to be balanced"; success = true; } else { - message = "Files not completely migrated from "+ srcDatastore.getId() + ". Datastore (source): " + srcDatastore.getId() + "has equal or more free space than destination."+ - " If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command"; + message = String.format("Files not completely migrated from %s. Source datastore " + + "has equal or more free space than destination. If you want to continue using the Image Store, " + + "please change the read-only status using 'update imagestore' command", srcDatastore); success = false; } } else { @@ -353,7 +354,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra task.setTemplateChain(templateChains); } futures.add((executor.submit(task))); - logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); + logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()); return storageCapacities; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 36e28145949..06061908888 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -885,7 +885,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati Account owner, long deviceId, String configurationId) { assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template."; - Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); + Long size = _tmpltMgr.getTemplateSize(template, vm.getDataCenterId()); if (rootDisksize != null) { if (template.isDeployAsIs()) { // Volume size specified from template deploy-as-is @@ -994,7 +994,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (configurationDetail != null) { configurationId = configurationDetail.getValue(); } - templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template.getId(), DataStoreRole.Image, configurationId); + templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template, DataStoreRole.Image, configurationId); if (CollectionUtils.isNotEmpty(templateAsIsDisks)) { templateAsIsDisks = templateAsIsDisks.stream() .filter(x -> !x.isIso()) diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java index 376e189d875..452cfd90056 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java @@ -47,7 +47,7 @@ public class AgentManagerImplTest { host = new HostVO("some-Uuid"); host.setDataCenterId(1L); cmds = new StartupCommand[]{new StartupRoutingCommand()}; - attache = new ConnectedAgentAttache(null, 1L, "kvm-attache", null, false); + attache = new ConnectedAgentAttache(null, 1L, "uuid", "kvm-attache", null, false); hostDao = Mockito.mock(HostDao.class); storagePoolMonitor = Mockito.mock(Listener.class); diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java index 3fa6d8d9729..0b42b505668 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java @@ -31,8 +31,8 @@ public class ConnectedAgentAttacheTest { Link link = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); assertTrue(agentAttache1.equals(agentAttache2)); } @@ -42,7 +42,7 @@ public class ConnectedAgentAttacheTest { Link link = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); assertFalse(agentAttache1.equals(null)); } @@ -53,8 +53,8 @@ public class ConnectedAgentAttacheTest { Link link1 = mock(Link.class); Link link2 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link1, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link2, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link1, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link2, false); assertFalse(agentAttache1.equals(agentAttache2)); } @@ -64,8 +64,8 @@ public class ConnectedAgentAttacheTest { Link link1 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, null, link1, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, "uuid", null, link1, false); assertFalse(agentAttache1.equals(agentAttache2)); } @@ -75,7 +75,7 @@ public class ConnectedAgentAttacheTest { Link link1 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false); assertFalse(agentAttache1.equals("abc")); } diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java index fe9b7fafa81..65e31c271a4 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java @@ -26,6 +26,8 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.resource.ServerResource; +import java.util.UUID; + @RunWith(MockitoJUnitRunner.class) public class DirectAgentAttacheTest { @Mock @@ -36,9 +38,11 @@ public class DirectAgentAttacheTest { long _id = 0L; + String _uuid = UUID.randomUUID().toString(); + @Before public void setup() { - directAgentAttache = new DirectAgentAttache(_agentMgr, _id, "myDirectAgentAttache", _resource, false); + directAgentAttache = new DirectAgentAttache(_agentMgr, _id, _uuid, "myDirectAgentAttache", _resource, false); MockitoAnnotations.initMocks(directAgentAttache); } diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index d9971815f5e..fdda38fbc39 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -31,6 +31,7 @@ import javax.persistence.Table; import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host_pod_ref") @@ -197,4 +198,11 @@ public class HostPodVO implements Pod { public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + return String.format("HostPod %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java index 7423ded598f..c271325f3de 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vlan") @@ -192,24 +193,11 @@ public class VlanVO implements Vlan { @Override public String toString() { if (toString == null) { - toString = - new StringBuilder("Vlan[").append(vlanTag) - .append("|") - .append(vlanGateway) - .append("|") - .append(vlanNetmask) - .append("|") - .append(ip6Gateway) - .append("|") - .append(ip6Cidr) - .append("|") - .append(ipRange) - .append("|") - .append(ip6Range) - .append("|") - .append(networkId) - .append("]") - .toString(); + toString = String.format("Vlan %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", + "vlanTag", "vlanGateway", "vlanNetmask", "ip6Gateway", "ip6Cidr", + "ipRange", "ip6Range", "networkId")); + } return toString; } diff --git a/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java b/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java index 6390d923ed8..5a4a71f82e7 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.Encrypt; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * VmwareDatacenterVO contains information of Vmware Datacenter associated with a CloudStack zone. @@ -125,7 +126,9 @@ public class VmwareDatacenterVO implements VmwareDatacenter { @Override public String toString() { - return new StringBuilder("VmwareDatacenter[").append(guid).append("]").toString(); + return String.format("VmwareDatacenter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "guid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java index 4c36a3401ca..c950fa31c88 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java @@ -26,6 +26,7 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -206,7 +207,9 @@ public class DomainVO implements Domain { @Override public String toString() { - return new StringBuilder("Domain:").append(id).append(path).toString(); + return String.format("Domain %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "path")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index b5b634a73a7..a449eb450cf 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -712,7 +712,7 @@ public class HostVO implements Host { @Override public String toString() { - return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "type")); + return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java b/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java index 22bb2c26b65..ee5f67b09cd 100644 --- a/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java @@ -27,6 +27,7 @@ import javax.persistence.PrimaryKeyJoinColumn; import javax.persistence.Table; import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "load_balancer_healthcheck_policies") @@ -169,4 +170,11 @@ public class LBHealthCheckPolicyVO implements HealthCheckPolicy { public boolean isDisplay() { return display; } + + @Override + public String toString() { + return String.format("LBHealthCheckPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "pingPath")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java index fa5dcafba34..24d8b8e7f40 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java @@ -33,6 +33,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "autoscale_policies") @@ -92,7 +93,9 @@ public class AutoScalePolicyVO implements AutoScalePolicy, InternalIdentity { @Override public String toString() { - return new StringBuilder("AutoScalePolicy[").append("id-").append(id).append("]").toString(); + return String.format("AutoScalePolicy %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java index 652cbb340a3..307de9f1a60 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java @@ -32,6 +32,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.utils.db.GenericDao; @@ -126,11 +127,9 @@ public class AutoScaleVmGroupVO implements AutoScaleVmGroup, InternalIdentity, I @Override public String toString() { - return new StringBuilder("AutoScaleVmGroupVO[").append("id=").append(id) - .append("|name=").append(name) - .append("|loadBalancerId=").append(loadBalancerId) - .append("|profileId=").append(profileId) - .append("]").toString(); + return String.format("AutoScaleVmGroup %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "loadBalancerId", "profileId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java index 21291062756..562d908507e 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java @@ -37,6 +37,7 @@ import javax.persistence.Table; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; @@ -126,7 +127,9 @@ public class AutoScaleVmProfileVO implements AutoScaleVmProfile, Identity, Inter @Override public String toString() { - return new StringBuilder("AutoScaleVMProfileVO[").append("id").append(id).append("-").append("templateId").append("-").append(templateId).append("]").toString(); + return String.format("AutoScaleVMProfile %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "templateId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java b/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java index 18e67a4af61..0679dac3235 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "conditions") @@ -91,7 +92,9 @@ public class ConditionVO implements Condition, Identity, InternalIdentity { @Override public String toString() { - return new StringBuilder("Condition[").append("id-").append(id).append("]").toString(); + return String.format("Condition %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java index e5ab9886dda..be21515bb51 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java @@ -34,6 +34,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.network.Network; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "counter") @@ -79,7 +80,9 @@ public class CounterVO implements Counter, Identity, InternalIdentity { @Override public String toString() { - return new StringBuilder("Counter[").append("id-").append(id).append("]").toString(); + return String.format("Counter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java b/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java index 80bec1b8152..88c5c0885a8 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java @@ -30,6 +30,7 @@ import javax.persistence.Table; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * ExternalLoadBalancerDeviceVO contains information on external load balancer devices (F5/Netscaler VPX,MPX,SDX) added into a deployment @@ -244,4 +245,11 @@ public class ExternalLoadBalancerDeviceVO implements InternalIdentity, Identity public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + return String.format("ExternalLoadBalancerDevice %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "providerName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java index 4c7569a55b9..88e146d2a80 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java @@ -33,6 +33,7 @@ import javax.persistence.TemporalType; import com.cloud.network.IpAddress; import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.Ip; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * A bean representing a public IP Address @@ -268,7 +269,9 @@ public class IPAddressVO implements IpAddress { @Override public String toString() { - return new StringBuilder("Ip[").append(address).append("-").append(dataCenterId).append("]").toString(); + return String.format("IPAddress %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "dataCenterId", "address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java b/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java index e9f50a75a7b..72b8fc151b7 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java @@ -33,6 +33,7 @@ import javax.persistence.Table; import com.cloud.network.rules.StickinessPolicy; import com.cloud.utils.Pair; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "load_balancer_stickiness_policies") @@ -162,4 +163,11 @@ public class LBStickinessPolicyVO implements StickinessPolicy { public boolean isDisplay() { return display; } + + @Override + public String toString() { + return String.format("LBStickinessPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "methodName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java index bd5ea95dcc7..ad0338b9849 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java @@ -27,6 +27,7 @@ import javax.persistence.Table; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.LoadBalancer; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * This VO represents Public Load Balancer @@ -136,4 +137,11 @@ public class LoadBalancerVO extends FirewallRuleVO implements LoadBalancer { public String getCidrList() { return cidrList; } + + @Override + public String toString() { + return String.format("LoadBalancer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "purpose", "state")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java index 415b513b405..9557c7465bf 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java @@ -35,6 +35,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.network.Network.Service; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "physical_network_service_providers") @@ -109,6 +110,13 @@ public class PhysicalNetworkServiceProviderVO implements PhysicalNetworkServiceP this.uuid = UUID.randomUUID().toString(); } + + @Override + public String toString() { + return String.format("PhysicalNetworkServiceProvider %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "providerName")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java index 52ebe7596a4..68e023984a0 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java @@ -37,6 +37,7 @@ import com.cloud.network.PhysicalNetwork; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * NetworkConfigurationVO contains information about a specific physical network. @@ -248,4 +249,11 @@ public class PhysicalNetworkVO implements PhysicalNetwork { public String getName() { return name; } + + @Override + public String toString() { + return String.format("PhysicalNetwork %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java index 95e3693a99c..2439ea55b4a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java @@ -18,6 +18,7 @@ package com.cloud.network.dao; import com.cloud.network.RemoteAccessVpn; import com.cloud.utils.db.Encrypt; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -86,6 +87,11 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { this.vpcId = vpcId; } + @Override + public String toString() { + return String.format("RemoteAccessVpn %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid")); + } + @Override public State getState() { return state; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java index 52741fdd9a5..e5394238c31 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java @@ -29,6 +29,7 @@ import javax.persistence.Table; import com.cloud.network.Site2SiteCustomerGateway; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -110,6 +111,13 @@ public class Site2SiteCustomerGatewayVO implements Site2SiteCustomerGateway { this.ikeVersion = ikeVersion; } + @Override + public String toString() { + return String.format("Site2SiteCustomerGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java index b032966dd5a..4d6bee5c861 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.network.Site2SiteVpnConnection; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -182,4 +183,11 @@ public class Site2SiteVpnConnectionVO implements Site2SiteVpnConnection, Interna public String getName() { return null; } + + @Override + public String toString() { + return String.format("Site2SiteVpnConnection %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "state")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java index 703c78c7b86..a5eb7efce23 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.network.Site2SiteVpnGateway; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -70,6 +71,13 @@ public class Site2SiteVpnGatewayVO implements Site2SiteVpnGateway { this.domainId = domainId; } + @Override + public String toString() { + return String.format("Site2SiteVpnGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java index 07b25e7a28c..1dfdc5093a5 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java @@ -36,6 +36,7 @@ import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "firewall_rules") @@ -258,7 +259,9 @@ public class FirewallRuleVO implements FirewallRule { @Override public String toString() { - return new StringBuilder("Rule[").append(id).append("-").append(purpose).append("-").append(state).append("]").toString(); + return String.format("FirewallRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "networkId", "purpose", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java index 1980cd33d14..325a6efc867 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.security; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -89,6 +91,13 @@ public class SecurityGroupRuleVO implements SecurityRule { } } + @Override + public String toString() { + return String.format("SecurityGroupRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "type")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java index ec1cfae43b6..940baaad18d 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.security; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -60,6 +62,13 @@ public class SecurityGroupVO implements SecurityGroup { uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SecurityGroup %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java index f28b3125a09..4333d35d473 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java @@ -35,6 +35,7 @@ import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_acl_item") @@ -168,7 +169,9 @@ public class NetworkACLItemVO implements NetworkACLItem, Cloneable { @Override public String toString() { - return new StringBuilder("Rule[").append(id).append("-").append("NetworkACL").append("-").append(state).append("]").toString(); + return String.format("NetworkACLItem %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "aclId", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java index 280d5dfaf4b..37b9e7ff296 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java @@ -89,7 +89,7 @@ public class NetworkACLVO implements NetworkACL { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "vpcId"); + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "vpcId"); } public void setUuid(String uuid) { diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java index 72f6a89e70f..b1d4df35d4c 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc_gateways") @@ -163,9 +164,9 @@ public class VpcGatewayVO implements VpcGateway { @Override public String toString() { - StringBuilder buf = new StringBuilder("VpcGateway["); - buf.append(id).append("|").append(ip4Address.toString()).append("|").append(vpcId).append("]"); - return buf.toString(); + return String.format("VpcGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vpcId", "ip4Address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java index 41254ba4a8b..274b9fedecc 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java @@ -30,6 +30,7 @@ import javax.persistence.Table; import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc_offerings") @@ -180,8 +181,9 @@ public class VpcOfferingVO implements VpcOffering { @Override public String toString() { - StringBuilder buf = new StringBuilder("[VPC Offering ["); - return buf.append(id).append("-").append(name).append("]").toString(); + return String.format("VPCOffering %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } public void setName(String name) { diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java index 27d8227284b..e8ccc2ebcf1 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc") @@ -210,8 +211,9 @@ public class VpcVO implements Vpc { @Override public String toString() { - final StringBuilder buf = new StringBuilder("[VPC ["); - return buf.append(id).append("-").append(name).append("]").toString(); + return String.format("VPC %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java index 0bf110757d7..5cad366945f 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java @@ -32,6 +32,7 @@ import com.cloud.network.Network; import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_offerings") @@ -471,8 +472,8 @@ public class NetworkOfferingVO implements NetworkOffering { @Override public String toString() { - StringBuilder buf = new StringBuilder("[Network Offering ["); - return buf.append(id).append("-").append(trafficType).append("-").append(name).append("]").toString(); + return String.format("NetworkOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "trafficType")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java index 36e772edd3a..887939311b2 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "project_invitations") @@ -127,9 +128,9 @@ public class ProjectInvitationVO implements ProjectInvitation { @Override public String toString() { - StringBuilder buf = new StringBuilder("ProjectInvitation["); - buf.append(id).append("|projectId=").append(projectId).append("|accountId=").append(forAccountId).append("]"); - return buf.toString(); + return String.format("ProjectInvitation %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "projectId", "forAccountId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java index c8faa00812c..4ac34eeab4c 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java @@ -117,7 +117,9 @@ public class ProjectVO implements Project, Identity, InternalIdentity { @Override public String toString() { - return String.format("Project %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid", "domainId")); + return String.format("Project %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "domainId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java index 53017447c07..a54c1dd9b08 100644 --- a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java @@ -19,8 +19,7 @@ package com.cloud.storage; import com.cloud.utils.db.GenericDao; import com.google.gson.annotations.Expose; import org.apache.cloudstack.storage.object.Bucket; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -257,7 +256,8 @@ public class BucketVO implements Bucket { @Override public String toString() { - return String.format("Bucket %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) - .append("ObjectStoreId", getObjectStoreId()).toString()); + return String.format("Bucket %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "objectStoreId")); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java index b4f112f98e8..79f5bcb5157 100644 --- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java @@ -34,6 +34,7 @@ import javax.persistence.Transient; import com.cloud.offering.DiskOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "disk_offering") @@ -588,4 +589,11 @@ public class DiskOfferingVO implements DiskOffering { public void setDiskSizeStrictness(boolean diskSizeStrictness) { this.diskSizeStrictness = diskSizeStrictness; } + + @Override + public String toString() { + return String.format("DiskOffering %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java index c7848586826..f57d9d3dccf 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java @@ -27,6 +27,7 @@ import javax.persistence.Table; import com.cloud.storage.snapshot.SnapshotPolicy; import com.cloud.utils.DateUtil.IntervalType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshot_policy") @@ -76,6 +77,13 @@ public class SnapshotPolicyVO implements SnapshotPolicy { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SnapshotPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "volumeId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java index 86e0da53666..5e013e76d3c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java @@ -29,8 +29,7 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.storage.snapshot.SnapshotSchedule; -import org.apache.commons.lang3.builder.ReflectionToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshot_schedule") @@ -73,6 +72,13 @@ public class SnapshotScheduleVO implements SnapshotSchedule { this.asyncJobId = null; } + @Override + public String toString() { + return String.format("SnapshotSchedule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "volumeId", "policyId")); + } + @Override public long getId() { return id; @@ -134,11 +140,4 @@ public class SnapshotScheduleVO implements SnapshotSchedule { public void setUuid(String uuid) { this.uuid = uuid; } - - @Override - public String toString() { - ReflectionToStringBuilder reflectionToStringBuilder = new ReflectionToStringBuilder(this, ToStringStyle.JSON_STYLE); - reflectionToStringBuilder.setExcludeFieldNames("id"); - return reflectionToStringBuilder.toString(); - } } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java index 39d2cdd0b77..19c67a91e2c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java @@ -30,12 +30,11 @@ import javax.persistence.Id; import javax.persistence.Table; import org.apache.cloudstack.util.HypervisorTypeConverter; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; import com.google.gson.annotations.Expose; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshots") @@ -283,7 +282,8 @@ public class SnapshotVO implements Snapshot { @Override public String toString() { - return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) - .append("volumeId", getVolumeId()).toString()); + return String.format("Snapshot %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "volumeId", "version")); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index 9dc9734f8ab..10d08601515 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -572,7 +572,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Override public String toString() { - return String.format("Template %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uniqueName", "format")); + return String.format("Template %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "uniqueName", "format")); } public void setRemoved(Date removed) { diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index ea57ef91237..653be54a910 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -514,7 +514,9 @@ public class VolumeVO implements Volume { @Override public String toString() { - return new StringBuilder("Vol[").append(id).append("|name=").append(name).append("|vm=").append(instanceId).append("|").append(volumeType).append("]").toString(); + return String.format("Volume %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "volumeType", "instanceId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index 1da7d52a366..d204f67dc93 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; @Entity @@ -130,6 +131,12 @@ public class UserAccountVO implements UserAccount, InternalIdentity { public UserAccountVO() { } + @Override + public String toString() { + return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields + (this, "id", "uuid", "username", "accountName")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/user/UserVO.java b/engine/schema/src/main/java/com/cloud/user/UserVO.java index 7dac26429ac..6e355e102e6 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserVO.java @@ -296,7 +296,7 @@ public class UserVO implements User, Identity, InternalIdentity { @Override public String toString() { - return String.format("User %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "username", "uuid")); + return String.format("User %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "username")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java index f9ef5c40eba..2654b22374f 100644 --- a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java @@ -310,7 +310,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A if (!account.getNeedsCleanup()) { account.setNeedsCleanup(true); if (!update(accountId, account)) { - logger.warn("Failed to mark account id=" + accountId + " for cleanup"); + logger.warn("Failed to mark account {} for cleanup", account); } } } diff --git a/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java b/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java index 4437af29bc1..d5bd8c5aaae 100644 --- a/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java @@ -32,6 +32,7 @@ import javax.persistence.Table; import com.cloud.user.Account; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "instance_group") @@ -74,6 +75,12 @@ public class InstanceGroupVO implements InstanceGroup { super(); } + @Override + public String toString() { + return String.format("InstanceGroup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name")); + } + + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/vm/NicVO.java b/engine/schema/src/main/java/com/cloud/vm/NicVO.java index 936efd112b7..6c569e22dd9 100644 --- a/engine/schema/src/main/java/com/cloud/vm/NicVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/NicVO.java @@ -330,7 +330,10 @@ public class NicVO implements Nic { @Override public String toString() { - return String.format("Nic %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "instanceId", "deviceId", "broadcastUri", "reservationId", "iPv4Address")); + return String.format("Nic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "instanceId", + "deviceId", "broadcastUri", "reservationId", "iPv4Address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java index 093434052bc..4c8208b4be8 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; import com.cloud.vm.NicSecondaryIp; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "nic_secondary_ips") @@ -87,6 +88,14 @@ public class NicSecondaryIpVO implements NicSecondaryIp { @Column(name = "vmId") long vmId; + @Override + public String toString() { + return String.format("NicSecondaryIp %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "vmId", + "nicId", "ip4Address", "ip6Address", "networkId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 744518ba743..0e87e6bcb7d 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -109,7 +109,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Inject VolumeDao volumeDao; @Inject - HostDao hostDao; + protected HostDao hostDao; protected Attribute _updateTimeAttr; @@ -140,8 +140,6 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem private static final String COUNT_VMS_BY_ZONE_AND_STATE_AND_HOST_TAG = "SELECT COUNT(1) FROM vm_instance vi JOIN service_offering so ON vi.service_offering_id=so.id " + "JOIN vm_template vt ON vi.vm_template_id = vt.id WHERE vi.data_center_id = ? AND vi.state = ? AND vi.removed IS NULL AND (so.host_tag = ? OR vt.template_tag = ?)"; - @Inject - protected HostDao _hostDao; public VMInstanceDaoImpl() { } @@ -155,13 +153,13 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem IdStatesSearch.done(); VMClusterSearch = createSearchBuilder(); - SearchBuilder hostSearch = _hostDao.createSearchBuilder(); + SearchBuilder hostSearch = hostDao.createSearchBuilder(); VMClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), VMClusterSearch.entity().getHostId(), JoinType.INNER); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); VMClusterSearch.done(); LHVMClusterSearch = createSearchBuilder(); - SearchBuilder hostSearch1 = _hostDao.createSearchBuilder(); + SearchBuilder hostSearch1 = hostDao.createSearchBuilder(); LHVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), LHVMClusterSearch.entity().getLastHostId(), JoinType.INNER); LHVMClusterSearch.and("hostid", LHVMClusterSearch.entity().getHostId(), Op.NULL); hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ); @@ -577,13 +575,13 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem logger.debug(str.toString()); } else { - logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); + logger.debug("Unable to update the vm {}; the vm either doesn't exist or already removed", vm); } } if (vo != null && vo.getState() == newState) { // allow for concurrent update if target state has already been matched - logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); + logger.debug("VM {} state has been already been updated to {}", vo, newState); return true; } } @@ -954,8 +952,10 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem State instanceState = instance.getState(); if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) || (powerState == VirtualMachine.PowerState.PowerOn && instanceState == State.Stopped)) { - logger.debug(String.format("VM id: %d on host id: %d and power host id: %d is in %s state, but power state is %s", - instance.getId(), instance.getHostId(), powerHostId, instanceState, powerState)); + HostVO instanceHost = hostDao.findById(instance.getHostId()); + HostVO powerHost = powerHostId == instance.getHostId() ? instanceHost : hostDao.findById(powerHostId); + logger.debug("VM: {} on host: {} and power host : {} is in {} state, but power state is {}", + instance, instanceHost, powerHost, instanceState, powerState); return false; } return true; diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java index c48396ad021..5b6f97b82e7 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -36,6 +36,7 @@ import javax.persistence.Transient; import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vm_snapshots") @@ -145,6 +146,13 @@ public class VMSnapshotVO implements VMSnapshot { this.serviceOfferingId = serviceOfferingId; } + @Override + public String toString() { + return String.format("VMSnapshot %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "vmId")); + } + @Override public String getDescription() { return description; diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java index ab8f5f2cd84..03a978f8546 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java @@ -176,7 +176,7 @@ public class VMSnapshotDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); + logger.debug("Unable to update VM snapshot: {}, as there is no such snapshot exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java index 084df29fa42..cff139a9263 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java @@ -126,7 +126,8 @@ public class RoleVO implements Role { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid", "roleType"); + return String.format("Role %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "roleType")); } public boolean isPublicRole() { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java index 536b96c6567..9b8fc598171 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java @@ -28,6 +28,7 @@ import javax.persistence.Id; import javax.persistence.Table; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "affinity_group") @@ -119,9 +120,8 @@ public class AffinityGroupVO implements AffinityGroup { @Override public String toString() { - StringBuilder buf = new StringBuilder("AffinityGroup["); - buf.append(uuid).append("]"); - return buf.toString(); + return String.format("AffinityGroup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java index ba31dc59d39..fd3c0be18d2 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java @@ -29,6 +29,7 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.utils.DateUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "backup_schedule") @@ -68,6 +69,12 @@ public class BackupScheduleVO implements BackupSchedule { this.scheduledTimestamp = scheduledTimestamp; } + @Override + public String toString() { + return String.format("BackupSchedule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "vmId", "schedule", "scheduleType")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 9b285e66cab..b4cd2f7bada 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.backup; import com.cloud.utils.db.GenericDao; import com.google.gson.Gson; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import java.util.Arrays; @@ -94,6 +95,12 @@ public class BackupVO implements Backup { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("Backup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vmId", "backupType", "externalId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java index eab2e555d69..6afc2e7707a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.cluster; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -66,6 +67,13 @@ public class ClusterDrsPlanMigrationVO implements ClusterDrsPlanMigration { } + @Override + public String toString() { + return String.format("ClusterDrsPlanMigration %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "planId", "vmId", "jobId")); + } + public long getId() { return id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java index 0ce25ae90fe..68f7fe4b44e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.cluster; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -68,6 +69,13 @@ public class ClusterDrsPlanVO implements ClusterDrsPlan { uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("ClusterDrsPlan %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "clusterId")); + } + public long getId() { return id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java b/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java index 36aefa201f3..3c35f59659f 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.direct.download; import com.cloud.hypervisor.Hypervisor; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -57,6 +58,13 @@ public class DirectDownloadCertificateVO implements DirectDownloadCertificate { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("DirectDownloadCertificate %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "alias")); + } + public void setId(Long id) { this.id = id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java index af48e5e04ac..917f8bb800a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java @@ -50,6 +50,7 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.google.gson.Gson; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vm_instance") @@ -460,7 +461,7 @@ public class VMEntityVO implements VirtualMachine, FiniteStateObject details) { this.details = details; } + + @Override + public String toString() { + return String.format("ObjectStore %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "providerName")); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 707091adb87..92a444bd83f 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -26,6 +26,7 @@ import com.cloud.utils.UriUtils; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -370,7 +371,7 @@ public class StoragePoolVO implements StoragePool { @Override public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(poolType).append("]").toString(); + return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "poolType")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java index 3b869a5429f..8870bf6d4d8 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java @@ -23,6 +23,7 @@ import java.util.Date; import java.util.UUID; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -120,6 +121,13 @@ public class SharedFSVO implements SharedFS { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SharedFS %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public Class getEntityType() { return SharedFS.class; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java index 176f88c5f6b..e0065db1e77 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.vm.schedule; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -95,6 +96,11 @@ public class VMScheduleVO implements VMSchedule { this.enabled = enabled; } + @Override + public String toString() { + return String.format("VMSchedule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "action", "description")); + } + @Override public String getUuid() { return uuid; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java index 0c2dd94cce5..775e9cfe40c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.vm.schedule; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -71,6 +73,14 @@ public class VMScheduledJobVO implements VMScheduledJob { this.scheduledTime = scheduledTime; } + + @Override + public String toString() { + return String.format("VMScheduledJob %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "action", "vmScheduleId", "vmId", "asyncJobId")); + } + @Override public String getUuid() { return uuid; diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java index 43679081550..5f8b2dd90ec 100644 --- a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java @@ -34,10 +34,13 @@ import java.util.ArrayList; import java.util.Calendar; import java.util.Date; +import com.cloud.host.dao.HostDao; import org.joda.time.DateTime; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; @@ -49,19 +52,24 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import org.mockito.junit.MockitoJUnitRunner; /** * Created by sudharma_jain on 3/2/17. */ - +@RunWith(MockitoJUnitRunner.class) public class VMInstanceDaoImplTest { + @InjectMocks @Spy - VMInstanceDaoImpl vmInstanceDao = new VMInstanceDaoImpl(); + VMInstanceDaoImpl vmInstanceDao; @Mock VMInstanceVO vm; + @Mock + HostDao _hostDao; + private AutoCloseable closeable; @Before @@ -111,9 +119,6 @@ public class VMInstanceDaoImplTest { @Test public void testUpdatePowerStateVmNotFound() { - when(vm.getPowerStateUpdateTime()).thenReturn(null); - when(vm.getPowerHostId()).thenReturn(1L); - when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); doReturn(null).when(vmInstanceDao).findById(anyLong()); boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOff, new Date()); @@ -154,7 +159,6 @@ public class VMInstanceDaoImplTest { when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Running); doReturn(vm).when(vmInstanceDao).findById(anyLong()); - doReturn(true).when(vmInstanceDao).update(anyLong(), any()); boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOn, new Date()); @@ -170,8 +174,8 @@ public class VMInstanceDaoImplTest { public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmStopped() { when(vm.getPowerStateUpdateTime()).thenReturn(null); when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getHostId()).thenReturn(1L); when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); - when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Stopped); doReturn(vm).when(vmInstanceDao).findById(anyLong()); doReturn(true).when(vmInstanceDao).update(anyLong(), any()); @@ -190,8 +194,8 @@ public class VMInstanceDaoImplTest { public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmRunning() { when(vm.getPowerStateUpdateTime()).thenReturn(null); when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getHostId()).thenReturn(1L); when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOff); - when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Running); doReturn(vm).when(vmInstanceDao).findById(anyLong()); doReturn(true).when(vmInstanceDao).update(anyLong(), any()); diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java index bf8fa43fe6c..1212bc66fd7 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java @@ -210,7 +210,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot TemplateInfo directDownloadTemplateInfo = templateDataFactory.getReadyBypassedTemplateOnPrimaryStore(srcVolumeInfo.getTemplateId(), destDataStore.getId(), destHost.getId()); if (directDownloadTemplateInfo != null) { - logger.debug(String.format("Template %s was of direct download type and successfully staged to primary store %s", directDownloadTemplateInfo.getId(), directDownloadTemplateInfo.getDataStore().getId())); + logger.debug("Template {} was of direct download type and successfully staged to primary store {}", directDownloadTemplateInfo.getImage(), directDownloadTemplateInfo.getDataStore()); return; } @@ -221,8 +221,8 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot TemplateInfo sourceTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), sourceTemplateDataStore); TemplateObjectTO sourceTemplate = new TemplateObjectTO(sourceTemplateInfo); - logger.debug(String.format("Could not find template [id=%s, name=%s] on the storage pool [id=%s]; copying the template to the target storage pool.", - srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getName(), destDataStore.getId())); + logger.debug("Could not find template [id={}, uuid={}, name={}] on the storage pool [{}]; copying the template to the target storage pool.", + srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getUuid(), sourceTemplateInfo.getName(), destDataStore); TemplateInfo destTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), destDataStore); final TemplateObjectTO destTemplate = new TemplateObjectTO(destTemplateInfo); @@ -234,7 +234,8 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot return; } } - logger.debug(String.format("Skipping 'copy template to target filesystem storage before migration' due to the template [%s] already exist on the storage pool [%s].", srcVolumeInfo.getTemplateId(), destStoragePool.getId())); + logger.debug("Skipping 'copy template to target filesystem storage before migration' due to the template [{}] already exist on the storage pool [{}].", + srcVolumeInfo.getTemplateId(), destStoragePool); } /** @@ -267,8 +268,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot } private String generateFailToCopyTemplateMessage(TemplateObjectTO sourceTemplate, DataStore destDataStore) { - return String.format("Failed to copy template [id=%s, name=%s] to the primary storage pool [id=%s].", sourceTemplate.getId(), - sourceTemplate.getName(), destDataStore.getId()); + return String.format("Failed to copy template [%s] to the primary storage pool [%s].", sourceTemplate, destDataStore); } /** diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java index b7468195f5d..808c319b40f 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java @@ -303,7 +303,6 @@ public class KvmNonManagedStorageSystemDataMotionTest { Mockito.lenient().when(dataStoreVO.getId()).thenReturn(0l); ImageStoreEntity destDataStore = Mockito.mock(ImageStoreImpl.class); - Mockito.doReturn(0l).when(destDataStore).getId(); Answer copyCommandAnswer = Mockito.mock(Answer.class); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index 5109118fb54..c6430bcf9f9 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -123,9 +123,9 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { if (logger.isDebugEnabled()) { if (!found) { - logger.debug("template " + templateId + " is not in store:" + store.getId() + ", type:" + store.getRole()); + logger.debug("template {} with id {} is not in store: {}, type: {}", templ, templateId, store, store.getRole()); } else { - logger.debug("template " + templateId + " is already in store:" + store.getId() + ", type:" + store.getRole()); + logger.debug("template {} with id {} is already in store:{}, type: {}", templ, templateId, store, store.getRole()); } } @@ -242,7 +242,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { HostVO host = hostDao.findById(hostId); List pools = getStoragePoolsForScope(host.getDataCenterId(), host.getClusterId(), hostId, host.getHypervisorType()); if (CollectionUtils.isEmpty(pools)) { - throw new CloudRuntimeException(String.format("No storage pool found to download template: %s", templateVO.getName())); + throw new CloudRuntimeException(String.format("No storage pool found to download template: %s", templateVO)); } List existingRefs = templatePoolDao.listByTemplateId(templateVO.getId()); return getOneMatchingPoolIdFromRefs(existingRefs, pools); @@ -274,7 +274,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { } if (poolId == null) { - throw new CloudRuntimeException("No storage pool specified to download template: " + templateId); + throw new CloudRuntimeException(String.format("No storage pool specified to download template: %s", templateVO)); } StoragePoolVO poolVO = primaryDataStoreDao.findById(poolId); @@ -284,7 +284,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { VMTemplateStoragePoolVO spoolRef = templatePoolDao.findByPoolTemplate(poolId, templateId, null); if (spoolRef == null) { - throw new CloudRuntimeException("Template not created on managed storage pool: " + poolId + " to copy the download template: " + templateId); + throw new CloudRuntimeException(String.format("Template not created on managed storage pool: %s to copy the download template: %s", poolVO, templateVO)); } else if (spoolRef.getDownloadState() == VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED) { directDownloadManager.downloadTemplate(templateId, poolId, hostId); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index abc955c2e49..38e0d0d081c 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -280,7 +280,7 @@ public class TemplateServiceImpl implements TemplateService { TemplateDataStoreVO tmpltHost = _vmTemplateStoreDao.findByStoreTemplate(store.getId(), template.getId()); if (tmpltHost == null) { associateTemplateToZone(template.getId(), dcId); - logger.info("Downloading builtin template " + template.getUniqueName() + " to data center: " + dcId); + logger.info("Downloading builtin template {} to data center: {}", template, dcId); TemplateInfo tmplt = _templateFactory.getTemplate(template.getId(), DataStoreRole.Image); createTemplateAsync(tmplt, store, null); } @@ -299,7 +299,7 @@ public class TemplateServiceImpl implements TemplateService { return false; } if (zoneId != null && _vmTemplateStoreDao.findByTemplateZone(template.getId(), zoneId, DataStoreRole.Image) == null) { - logger.debug(String.format("Template %s is not present on any image store for the zone ID: %d, its download cannot be skipped", template.getUniqueName(), zoneId)); + logger.debug("Template {} is not present on any image store for the zone ID: {}, its download cannot be skipped", template, zoneId); return false; } return true; @@ -376,29 +376,29 @@ public class TemplateServiceImpl implements TemplateService { TemplateProp tmpltInfo = templateInfos.remove(uniqueName); toBeDownloaded.remove(tmplt); if (tmpltStore != null) { - logger.info("Template Sync found " + uniqueName + " already in the image store"); + logger.info("Template Sync found {} already in the image store", tmplt); if (tmpltStore.getDownloadState() != Status.DOWNLOADED) { tmpltStore.setErrorString(""); } if (tmpltInfo.isCorrupted()) { tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId(); + String msg = String.format("Template %s is corrupted on secondary storage %s", tmplt, store); tmpltStore.setErrorString(msg); logger.info(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED, zoneId, null, msg, msg); if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) { - logger.info("Template Sync found " + uniqueName + " on image store " + storeId + " uploaded using SSVM as corrupted, marking it as failed"); + logger.info("Template Sync found {} on image store {} uploaded using SSVM as corrupted, marking it as failed", tmplt, store); tmpltStore.setState(State.Failed); try { stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.getUrl() == null) { - msg = "Private template (" + tmplt + ") with install path " + tmpltInfo.getInstallPath() + " is corrupted, please check in image store: " + tmpltStore.getDataStoreId(); + msg = String.format("Private template (%s) with install path %s is corrupted, please check in image store: %s", tmplt, tmpltInfo.getInstallPath(), store); logger.warn(msg); } else { - logger.info("Removing template_store_ref entry for corrupted template " + tmplt.getName()); + logger.info("Removing template_store_ref entry for corrupted template {}", tmplt); _vmTemplateStoreDao.remove(tmpltStore.getId()); toBeDownloaded.add(tmplt); } @@ -438,7 +438,7 @@ public class TemplateServiceImpl implements TemplateService { try { stateMachine.transitTo(tmplt, event, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } @@ -483,30 +483,30 @@ public class TemplateServiceImpl implements TemplateService { tmpltInfo.getPhysicalSize(), tmpltInfo.getSize(), VirtualMachineTemplate.class.getName(), tmplt.getUuid()); } } else if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) { - logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + " uploaded using SSVM, marking it as failed"); + logger.info("Template Sync did not find {} on image store {} uploaded using SSVM, marking it as failed", tmplt, store); toBeDownloaded.remove(tmplt); tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId(); + String msg = String.format("Template %s is corrupted on secondary storage %s", tmplt, store); tmpltStore.setErrorString(msg); tmpltStore.setState(State.Failed); _vmTemplateStoreDao.update(tmpltStore.getId(), tmpltStore); try { stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.isDirectDownload()) { - logger.info("Template " + tmplt.getName() + ":" + tmplt.getId() + " is marked for direct download, discarding it for download on image stores"); + logger.info("Template {} is marked for direct download, discarding it for download on image stores", tmplt); toBeDownloaded.remove(tmplt); } else { - logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + ", may request download based on available hypervisor types"); + logger.info("Template Sync did not find {} on image store {}, may request download based on available hypervisor types", tmplt, store); if (tmpltStore != null) { if (_storeMgr.isRegionStore(store) && tmpltStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED && tmpltStore.getState() == State.Ready && tmpltStore.getInstallPath() == null) { logger.info("Keep fake entry in template store table for migration of previous NFS to object store"); } else { - logger.info("Removing leftover template " + uniqueName + " entry from template store table"); + logger.info("Removing leftover template {} entry from template store table", tmplt); // remove those leftover entries _vmTemplateStoreDao.remove(tmpltStore.getId()); } @@ -530,12 +530,12 @@ public class TemplateServiceImpl implements TemplateService { // download. for (VMTemplateVO tmplt : toBeDownloaded) { if (tmplt.getUrl() == null) { // If url is null, skip downloading - logger.info("Skip downloading template " + tmplt.getUniqueName() + " since no url is specified."); + logger.info("Skip downloading template {} since no url is specified.", tmplt); continue; } // if this is private template, skip sync to a new image store if (isSkipTemplateStoreDownload(tmplt, zoneId)) { - logger.info("Skip sync downloading private template " + tmplt.getUniqueName() + " to a new image store"); + logger.info("Skip sync downloading private template {} to a new image store", tmplt); continue; } @@ -551,7 +551,7 @@ public class TemplateServiceImpl implements TemplateService { } if (availHypers.contains(tmplt.getHypervisorType())) { - logger.info("Downloading template " + tmplt.getUniqueName() + " to image store " + store.getName()); + logger.info("Downloading template {} to image store {}", tmplt, store); associateTemplateToZone(tmplt.getId(), zoneId); TemplateInfo tmpl = _templateFactory.getTemplate(tmplt.getId(), store); TemplateOpContext context = new TemplateOpContext<>(null,(TemplateObject)tmpl, null); @@ -560,8 +560,7 @@ public class TemplateServiceImpl implements TemplateService { caller.setContext(context); createTemplateAsync(tmpl, store, caller); } else { - logger.info("Skip downloading template " + tmplt.getUniqueName() + " since current data center does not have hypervisor " + - tmplt.getHypervisorType().toString()); + logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType()); } } } @@ -585,10 +584,10 @@ public class TemplateServiceImpl implements TemplateService { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - logger.info("Failed to deleted template at store: " + store.getName()); + logger.info("Failed to deleted template at store: {}", store); } else { - String description = "Deleted template " + tInfo.getTemplateName() + " on secondary storage " + storeId; + String description = String.format("Deleted template %s on secondary storage %s", tInfo.getTemplateName(), store); logger.info(description); } @@ -598,7 +597,7 @@ public class TemplateServiceImpl implements TemplateService { syncLock.unlock(); } } else { - logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing template sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on {}, another thread may be doing template sync on data store {} now.", lockString, store); } } finally { syncLock.releaseRef(); @@ -673,15 +672,14 @@ public class TemplateServiceImpl implements TemplateService { if (tmpltStore != null) { physicalSize = tmpltStore.getPhysicalSize(); } else { - logger.warn("No entry found in template_store_ref for template id: " + template.getId() + " and image store id: " + ds.getId() + - " at the end of registering template!"); + logger.warn("No entry found in template_store_ref for template: {} and image store: {} at the end of registering template!", template, ds); } Scope dsScope = ds.getScope(); if (dsScope.getScopeId() != null) { UsageEventUtils.publishUsageEvent(etype, template.getAccountId(), dsScope.getScopeId(), template.getId(), template.getName(), null, null, physicalSize, template.getSize(), VirtualMachineTemplate.class.getName(), template.getUuid()); } else { - logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); + logger.warn("Zone scope image store {} has a null scope id", ds); } _resourceLimitMgr.incrementResourceCount(accountId, Resource.ResourceType.secondary_storage, template.getSize()); } @@ -707,7 +705,7 @@ public class TemplateServiceImpl implements TemplateService { return tanswer.getTemplateInfo(); } else { if (logger.isDebugEnabled()) { - logger.debug("can not list template for secondary storage host " + ssStore.getId()); + logger.debug("can not list template for secondary storage host {}", ssStore); } } @@ -844,8 +842,7 @@ public class TemplateServiceImpl implements TemplateService { _resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize()); } else { // Delete the Datadisk templates that were already created as they are now invalid - logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent" - + " template download"); + logger.debug("Since creation of Datadisk template: {} failed, delete other Datadisk templates that were created as part of parent template download", templateVO); TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore); cleanupDatadiskTemplates(parentTemplateInfo); } @@ -859,8 +856,7 @@ public class TemplateServiceImpl implements TemplateService { TemplateApiResult result = null; result = templateFuture.get(); if (!result.isSuccess()) { - logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent" - + " template download"); + logger.debug("Since creation of parent template: {} failed, delete Datadisk templates that were created as part of parent template download", templateInfo); cleanupDatadiskTemplates(templateInfo); } return result.isSuccess(); @@ -909,12 +905,12 @@ public class TemplateServiceImpl implements TemplateService { DataStore imageStore = parentTemplateInfo.getDataStore(); List datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId()); for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) { - logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: {} from image store: {}", datadiskTemplateToDelete, imageStore); AsyncCallFuture future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore)); try { TemplateApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete datadisk template: {} from image store: {} due to: {}", datadiskTemplateToDelete, imageStore, result.getResult()); break; } _vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId()); @@ -1027,33 +1023,32 @@ public class TemplateServiceImpl implements TemplateService { // This routine is used to push templates currently on cache store, but not in region store to region store. // used in migrating existing NFS secondary storage to S3. @Override - public void syncTemplateToRegionStore(long templateId, DataStore store) { + public void syncTemplateToRegionStore(VirtualMachineTemplate template, DataStore store) { if (_storeMgr.isRegionStore(store)) { if (logger.isDebugEnabled()) { - logger.debug("Sync template " + templateId + " from cache to object store..."); + logger.debug("Sync template {} from cache to object store...", template); } // if template is on region wide object store, check if it is really downloaded there (by checking install_path). Sync template to region // wide store if it is not there physically. - TemplateInfo tmplOnStore = _templateFactory.getTemplate(templateId, store); + TemplateInfo tmplOnStore = _templateFactory.getTemplate(template.getId(), store); if (tmplOnStore == null) { - throw new CloudRuntimeException("Cannot find an entry in template_store_ref for template " + templateId + " on region store: " + store.getName()); + throw new CloudRuntimeException(String.format("Cannot find an entry in template_store_ref for template %s on region store: %s", template, store)); } if (tmplOnStore.getInstallPath() == null || tmplOnStore.getInstallPath().length() == 0) { // template is not on region store yet, sync to region store - TemplateInfo srcTemplate = _templateFactory.getReadyTemplateOnCache(templateId); + TemplateInfo srcTemplate = _templateFactory.getReadyTemplateOnCache(template.getId()); if (srcTemplate == null) { - throw new CloudRuntimeException("Cannot find template " + templateId + " on cache store"); + throw new CloudRuntimeException(String.format("Cannot find template %s on cache store", tmplOnStore)); } AsyncCallFuture future = syncToRegionStoreAsync(srcTemplate, store); try { TemplateApiResult result = future.get(); if (result.isFailed()) { - throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName() + ":" + - result.getResult()); + throw new CloudRuntimeException(String.format("sync template from cache to region wide store failed for image store %s: %s", store, result.getResult())); } _cacheMgr.releaseCacheObject(srcTemplate); // reduce reference count for template on cache, so it can recycled by schedule } catch (Exception ex) { - throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName()); + throw new CloudRuntimeException(String.format("sync template from cache to region wide store failed for image store %s", store)); } } } @@ -1071,8 +1066,7 @@ public class TemplateServiceImpl implements TemplateService { // generate a URL from source template ssvm to download to destination data store String url = generateCopyUrl(srcTemplate); if (url == null) { - logger.warn("Unable to start/resume copy of template " + srcTemplate.getUniqueName() + " to " + destStore.getName() + - ", no secondary storage vm in running state in source zone"); + logger.warn("Unable to start/resume copy of template {} to {}, no secondary storage vm in running state in source zone", srcTemplate, destStore); throw new CloudRuntimeException("No secondary VM in running state in source template zone "); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index d59f6d4c54d..14db5ea5771 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -228,4 +228,9 @@ public class ImageStoreImpl implements ImageStoreEntity { return driver.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, bootable, fileSize, callback); } + @Override + public String toString() { + return imageDataStoreVO.toString(); + } + } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index b7d83c70223..a3b7d0c9ecc 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import com.cloud.cpu.CPU; import com.cloud.storage.StorageManager; import com.cloud.user.UserData; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -102,6 +103,7 @@ public class TemplateObject implements TemplateInfo { imageVO.setSize(size); } + @Override public VMTemplateVO getImage() { if (imageVO == null) { String msg = String.format("Template Object is not properly initialised %s", this.toString()); @@ -596,4 +598,11 @@ public class TemplateObject implements TemplateInfo { public boolean isFollowRedirects() { return followRedirects; } + + @Override + public String toString() { + return String.format("TemplateObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "imageVO", "dataStore")); + } } diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java index f1c27526f52..a96d87ada04 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.object.ObjectStoreDriver; import org.apache.cloudstack.storage.object.ObjectStoreEntity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.Date; import java.util.List; @@ -57,6 +58,13 @@ public class ObjectStoreImpl implements ObjectStoreEntity { return instance; } + @Override + public String toString() { + return String.format("ObjectStoreImpl %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "objectStoreVO", "provider")); + } + @Override public DataStoreDriver getDriver() { return this.driver; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index afc8be1e5f9..f5cfaf07274 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -136,7 +136,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { try { snapObj.processEvent(Snapshot.Event.OperationNotPerformed); } catch (NoTransitionException e) { - logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString()); + logger.debug("Failed to change state of the snapshot {}, due to {}", snapshot, e); throw new CloudRuntimeException(e.toString()); } return snapshotDataFactory.getSnapshot(snapObj.getId(), store); @@ -231,7 +231,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - logger.debug(String.format("Deleting snapshot %s from image cache [%s].", snapshotTo, cacheSnap.getDataStore().getName())); + logger.debug("Deleting snapshot {} from image cache [{}].", snapshotTo, cacheSnap.getDataStore()); cacheSnap.delete(); } } @@ -297,7 +297,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Destroying.equals(snapshotVO.getState())) { - throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status"); + throw new InvalidParameterValueException(String.format("Can't delete snapshot %s due to it is in %s Status", snapshotVO, snapshotVO.getState())); } return destroySnapshotEntriesAndFiles(snapshotVO, zoneId); @@ -442,7 +442,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); if (snapshotVO == null) { - throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to get lock on snapshot: %s", snapshot)); } try { @@ -463,9 +463,9 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { result = snapshotSvr.revertSnapshot(snapshot); if (!result) { - logger.debug("Failed to revert snapshot: " + snapshot.getId()); + logger.debug("Failed to revert snapshot: {}", snapshot); - throw new CloudRuntimeException("Failed to revert snapshot: " + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to revert snapshot: %s", snapshot)); } } finally { if (result) { @@ -498,7 +498,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); if (snapshotVO == null) { - throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to get lock on snapshot: %s", snapshot)); } try { diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index fc5e61ef710..4d8919ccc48 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -57,7 +57,7 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory { public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { SnapshotVO snapshot = snapshotDao.findById(obj.getId()); if (snapshot == null) { - throw new CloudRuntimeException("Can't find snapshot: " + obj.getId()); + throw new CloudRuntimeException("Can't find snapshot: " + obj); } SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); return so; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 961a647d7a8..a3964bd461e 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -41,6 +41,7 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -184,8 +185,7 @@ public class SnapshotObject implements SnapshotInfo { processEvent(Event.OperationNotPerformed); } catch (NoTransitionException ex) { logger.error("no transition error: ", ex); - throw new CloudRuntimeException("Error marking snapshot backed up: " + - this.snapshot.getId() + " " + ex.getMessage()); + throw new CloudRuntimeException(String.format("Error marking snapshot backed up: %s %s", this.snapshot, ex.getMessage())); } } @@ -370,12 +370,11 @@ public class SnapshotObject implements SnapshotInfo { if (snapshotTO.getVolume() != null && snapshotTO.getVolume().getPath() != null) { VolumeVO vol = volumeDao.findByUuid(snapshotTO.getVolume().getUuid()); if (vol != null) { - logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " + vol.getPath() + "->" + - snapshotTO.getVolume().getPath()); + logger.info("Update volume path change due to snapshot operation, volume {} path: {}->{}", vol, vol.getPath(), snapshotTO.getVolume().getPath()); vol.setPath(snapshotTO.getVolume().getPath()); volumeDao.update(vol.getId(), vol); } else { - logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); + logger.error("Couldn't find the original volume: {}", snapshotTO.getVolume()); } } } else { @@ -466,4 +465,11 @@ public class SnapshotObject implements SnapshotInfo { public Class getEntityType() { return Snapshot.class; } + + @Override + public String toString() { + return String.format("SnapshotObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "snapshot", "store")); + } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index dafc40e0674..2173aba3f05 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -198,7 +198,7 @@ public class SnapshotServiceImpl implements SnapshotService { AsyncCallFuture future = context.future; SnapshotResult snapResult = new SnapshotResult(snapshot, result.getAnswer()); if (result.isFailed()) { - logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); + logger.debug("create snapshot {} failed: {}", context.snapshot, result.getResult()); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); @@ -267,14 +267,14 @@ public class SnapshotServiceImpl implements SnapshotService { PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)snapshotOnPrimary.getDataStore().getDriver(); primaryStore.takeSnapshot(snapshot, caller); } catch (Exception e) { - logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + logger.debug("Failed to take snapshot: {}", snapshot, e); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); } catch (NoTransitionException e1) { logger.debug("Failed to change state for event: OperationFailed", e); } - throw new CloudRuntimeException("Failed to take snapshot" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to take snapshot %s", snapshot)); } SnapshotResult result; @@ -407,7 +407,7 @@ public class SnapshotServiceImpl implements SnapshotService { if (createSnapshotPayload.getAsyncBackup()) { _snapshotDao.remove(srcSnapshot.getId()); destSnapshot.processEvent(Event.OperationFailed); - throw new SnapshotBackupException("Failed in creating backup of snapshot with ID "+srcSnapshot.getId()); + throw new SnapshotBackupException(String.format("Failed in creating backup of snapshot %s", srcSnapshot)); } else { destSnapshot.processEvent(Event.OperationFailed); //if backup snapshot failed, mark srcSnapshot in snapshot_store_ref as failed also @@ -486,7 +486,7 @@ public class SnapshotServiceImpl implements SnapshotService { SnapshotResult res = null; try { if (result.isFailed()) { - logger.debug(String.format("Failed to delete snapshot [%s] due to: [%s].", snapshot.getUuid(), result.getResult())); + logger.debug("Failed to delete snapshot [{}] due to: [{}].", snapshot, result.getResult()); snapshot.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); res = new SnapshotResult(context.snapshot, null); res.setResult(result.getResult()); @@ -495,8 +495,8 @@ public class SnapshotServiceImpl implements SnapshotService { res = new SnapshotResult(context.snapshot, null); } } catch (Exception e) { - logger.error(String.format("An exception occurred while processing an event in delete snapshot callback from snapshot [%s].", snapshot.getUuid())); - logger.debug(String.format("Exception while processing an event in delete snapshot callback from snapshot [%s].", snapshot.getUuid()), e); + logger.error("An exception occurred while processing an event in delete snapshot callback from snapshot [{}].", snapshot); + logger.debug("Exception while processing an event in delete snapshot callback from snapshot [{}].", snapshot, e); res.setResult(e.toString()); } future.complete(res); @@ -541,11 +541,11 @@ public class SnapshotServiceImpl implements SnapshotService { if (result.isFailed()) { throw new CloudRuntimeException(result.getResult()); } - logger.debug(String.format("Successfully deleted snapshot [%s] with ID [%s].", snapInfo.getName(), snapInfo.getUuid())); + logger.debug("Successfully deleted snapshot [{}].", snapInfo); return true; } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Failed to delete snapshot [%s] due to: [%s].", snapInfo.getUuid(), e.getMessage())); - logger.debug(String.format("Failed to delete snapshot [%s].", snapInfo.getUuid()), e); + logger.error("Failed to delete snapshot [{}] due to: [{}].", snapInfo, e.getMessage()); + logger.debug("Failed to delete snapshot [{}].", snapInfo, e); } return false; @@ -556,7 +556,7 @@ public class SnapshotServiceImpl implements SnapshotService { PrimaryDataStore store = null; SnapshotInfo snapshotOnPrimaryStore = _snapshotFactory.getSnapshotOnPrimaryStore(snapshot.getId()); if (snapshotOnPrimaryStore == null) { - logger.warn("Cannot find an entry for snapshot " + snapshot.getId() + " on primary storage pools, searching with volume's primary storage pool"); + logger.warn("Cannot find an entry for snapshot {} on primary storage pools, searching with volume's primary storage pool", snapshot); VolumeInfo volumeInfo = volFactory.getVolume(snapshot.getVolumeId(), DataStoreRole.Primary); store = (PrimaryDataStore)volumeInfo.getDataStore(); } else { @@ -595,7 +595,7 @@ public class SnapshotServiceImpl implements SnapshotService { List snapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.BackedUp); if (snapshots != null) { for (SnapshotVO snapshot : snapshots) { - syncSnapshotToRegionStore(snapshot.getId(), store); + syncSnapshotToRegionStore(snapshot, store); } } } @@ -603,53 +603,49 @@ public class SnapshotServiceImpl implements SnapshotService { @Override public void cleanupVolumeDuringSnapshotFailure(Long volumeId, Long snapshotId) { - SnapshotVO snaphsot = _snapshotDao.findById(snapshotId); + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snaphsot != null) { - if (snaphsot.getState() != Snapshot.State.BackedUp) { + if (snapshot != null) { + if (snapshot.getState() != Snapshot.State.BackedUp) { List snapshotDataStoreVOs = _snapshotStoreDao.findBySnapshotId(snapshotId); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotDataStoreVOs) { - logger.debug("Remove snapshot " + snapshotId + ", status " + snapshotDataStoreVO.getState() + - " on snapshot_store_ref table with id: " + snapshotDataStoreVO.getId()); + logger.debug("Remove snapshot {}, status {} on snapshot_store_ref table with id: {}", snapshot, snapshotDataStoreVO.getState(), snapshotDataStoreVO.getId()); _snapshotStoreDao.remove(snapshotDataStoreVO.getId()); } - logger.debug("Remove snapshot " + snapshotId + " status " + snaphsot.getState() + " from snapshot table"); + logger.debug("Remove snapshot {} status {} from snapshot table", snapshot, snapshot.getState()); _snapshotDao.remove(snapshotId); } } - - } // push one individual snapshots currently on cache store to region store if it is not there already - private void syncSnapshotToRegionStore(long snapshotId, DataStore store){ + private void syncSnapshotToRegionStore(SnapshotVO snapshot, DataStore store){ // if snapshot is already on region wide object store, check if it is really downloaded there (by checking install_path). Sync snapshot to region // wide store if it is not there physically. - SnapshotInfo snapOnStore = _snapshotFactory.getSnapshot(snapshotId, store); + SnapshotInfo snapOnStore = _snapshotFactory.getSnapshot(snapshot.getId(), store); if (snapOnStore == null) { - throw new CloudRuntimeException("Cannot find an entry in snapshot_store_ref for snapshot " + snapshotId + " on region store: " + store.getName()); + throw new CloudRuntimeException(String.format("Cannot find an entry in snapshot_store_ref for snapshot %s on region store: %s", snapshot, store)); } if (snapOnStore.getPath() == null || snapOnStore.getPath().length() == 0) { if (logger.isDebugEnabled()) { - logger.debug("sync snapshot " + snapshotId + " from cache to object store..."); + logger.debug("sync snapshot {} from cache to object store...", snapshot); } // snapshot is not on region store yet, sync to region store - SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshotId); + SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshot.getId()); if (srcSnapshot == null) { - throw new CloudRuntimeException("Cannot find snapshot " + snapshotId + " on cache store"); + throw new CloudRuntimeException(String.format("Cannot find snapshot %s on cache store", snapshot)); } AsyncCallFuture future = syncToRegionStoreAsync(srcSnapshot, store); try { SnapshotResult result = future.get(); if (result.isFailed()) { - throw new CloudRuntimeException("sync snapshot from cache to region wide store failed for image store " + store.getName() + ":" - + result.getResult()); + throw new CloudRuntimeException(String.format("sync snapshot from cache to region wide store failed for image store %s: %s", store, result.getResult())); } _cacheMgr.releaseCacheObject(srcSnapshot); // reduce reference count for template on cache, so it can recycled by schedule } catch (Exception ex) { - throw new CloudRuntimeException("sync snapshot from cache to region wide store failed for image store " + store.getName()); + throw new CloudRuntimeException(String.format("sync snapshot from cache to region wide store failed for image store %s", store)); } } @@ -723,7 +719,7 @@ public class SnapshotServiceImpl implements SnapshotService { _snapshotDao.remove(srcSnapshot.getId()); } catch (NoTransitionException ex) { logger.debug("Failed to create backup " + ex.toString()); - throw new CloudRuntimeException("Failed to backup snapshot" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to backup snapshot%s", snapshot)); } } }); @@ -769,7 +765,7 @@ public class SnapshotServiceImpl implements SnapshotService { AsyncCallFuture future = new AsyncCallFuture<>(); EndPoint ep = epSelector.select(snapshot); if (ep == null) { - logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %d with store %d", snapshot.getId(), snapshot.getDataStore().getId())); + logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %s with store %s", snapshot.getSnapshotVO(), snapshot.getDataStore())); throw new ResourceUnavailableException("No secondary VM in running state in source snapshot zone", DataCenter.class, snapshot.getDataCenterId()); } DataStore store = snapshot.getDataStore(); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index d27beecfdda..1ec6e20fc9e 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -26,6 +26,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -162,8 +163,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot Map srcVolumeDestSnapshotMap = new HashMap<>(); List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); - StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); long prev_chain_size = 0; long virtual_size=0; for (VolumeObjectTO volume : volumeTOs) { @@ -188,7 +188,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot vmSnapshotVO.setParent(current.getId()); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap); if (snapshotGroup == null) { throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool"); @@ -291,7 +291,8 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot boolean result = false; try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); + Long storagePoolId = storagePool.getId(); Map srcSnapshotDestVolumeMap = new HashMap<>(); for (VolumeObjectTO volume : volumeTOs) { VMSnapshotDetailsVO vmSnapshotDetail = vmSnapshotDetailsDao.findDetail(vmSnapshotVO.getId(), "Vol_" + volume.getId() + "_Snapshot"); @@ -305,7 +306,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for reverting VM snapshot: " + vmSnapshot.getName()); } - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); result = client.revertSnapshot(systemId, srcSnapshotDestVolumeMap); if (!result) { throw new CloudRuntimeException("Failed to revert VM snapshot on PowerFlex storage pool"); @@ -314,7 +315,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot finalizeRevert(vmSnapshotVO, volumeTOs); result = true; } catch (Exception e) { - String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed due to " + e.getMessage(); + String errMsg = String.format("Revert VM: %s to snapshot: %s failed due to %s", userVm, vmSnapshotVO, e.getMessage()); logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { @@ -378,8 +379,8 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); - Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); - String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); + String systemId = storagePoolDetailsDao.findDetail(storagePool.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); if (systemId == null) { throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName()); } @@ -390,7 +391,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot } String snapshotGroupId = vmSnapshotDetailsVO.getValue(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); int volumesDeleted = client.deleteSnapshotGroup(systemId, snapshotGroupId); if (volumesDeleted <= 0) { throw new CloudRuntimeException("Failed to delete VM snapshot: " + vmSnapshot.getName()); @@ -509,7 +510,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot } } - private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + private ScaleIOGatewayClient getScaleIOClient(final StoragePool storagePool) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java index e3f4bcbdeca..758bbe0c8c4 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java @@ -73,6 +73,11 @@ public class LocalHostEndpoint implements EndPoint { return 0; } + @Override + public String getUuid() { + return ""; + } + @Override public String getHostAddr() { return "127.0.0.0"; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java index fdde4ce3e62..bd4bce29b0a 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java @@ -55,6 +55,7 @@ public class RemoteHostEndPoint implements EndPoint { protected Logger logger = LogManager.getLogger(getClass()); private long hostId; + private String hostUuid; private String hostAddress; private String publicAddress; @@ -74,6 +75,7 @@ public class RemoteHostEndPoint implements EndPoint { private void configure(Host host) { hostId = host.getId(); + hostUuid = host.getUuid(); hostAddress = host.getPrivateIpAddress(); publicAddress = host.getPublicIpAddress(); if (Host.Type.SecondaryStorageVM == host.getType()) { @@ -106,6 +108,11 @@ public class RemoteHostEndPoint implements EndPoint { return hostId; } + @Override + public String getUuid() { + return hostUuid; + } + // used when HypervisorGuruManager choose a different host to send command private void setId(long id) { HostVO host = _hostDao.findById(id); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index a621e8a076d..79be6588899 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -299,7 +299,7 @@ public class DefaultEndPointSelector implements EndPointSelector { @Override public EndPoint select(DataObject srcData, DataObject destData, StorageAction action, boolean encryptionRequired) { - logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary " + srcData.getId() + " dest=" + destData.getId()); + logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary {} dest={}", srcData, destData); if (action == StorageAction.BACKUPSNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { SnapshotInfo srcSnapshot = (SnapshotInfo)srcData; VolumeInfo volumeInfo = srcSnapshot.getBaseVolume(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index 01842441e26..f2a3d99f93c 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -25,6 +25,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.uservm.UserVm; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -150,23 +151,25 @@ public class VMSnapshotHelperImpl implements VMSnapshotHelper { } @Override - public Long getStoragePoolForVM(Long vmId) { - List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vmId); + public StoragePoolVO getStoragePoolForVM(UserVm vm) { + List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vm.getId()); if (rootVolumes == null || rootVolumes.isEmpty()) { - throw new InvalidParameterValueException("Failed to find root volume for the user vm:" + vmId); + throw new InvalidParameterValueException(String.format("Failed to find root volume for the user vm: %s", vm)); } VolumeVO rootVolume = rootVolumes.get(0); StoragePoolVO rootVolumePool = primaryDataStoreDao.findById(rootVolume.getPoolId()); if (rootVolumePool == null) { - throw new InvalidParameterValueException("Failed to find root volume storage pool for the user vm:" + vmId); + throw new InvalidParameterValueException(String.format( + "Failed to find storage pool for root volume %s for the user vm: %s", rootVolume, vm)); } if (rootVolumePool.isInMaintenance()) { - throw new InvalidParameterValueException("Storage pool for the user vm:" + vmId + " is in maintenance"); + throw new InvalidParameterValueException(String.format( + "Storage pool %s for root volume %s of the user vm: %s is in maintenance", rootVolumePool, rootVolume, vm)); } - return rootVolumePool.getId(); + return rootVolumePool; } @Override diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index db3f798a68a..a2e9eff2a08 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -177,19 +177,19 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { if (data.getType() == DataObjectType.TEMPLATE) { caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading template to data store " + dataStore.getId()); + logger.debug("Downloading template to data store {}", dataStore); } _downloadMonitor.downloadTemplateToStorage(data, caller); } else if (data.getType() == DataObjectType.VOLUME) { caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading volume to data store " + dataStore.getId()); + logger.debug("Downloading volume to data store {}", dataStore); } _downloadMonitor.downloadVolumeToStorage(data, caller); } else if (data.getType() == DataObjectType.SNAPSHOT) { caller.setCallback(caller.getTarget().createSnapshotAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading volume to data store " + dataStore.getId()); + logger.debug("Downloading snapshot to data store {}", dataStore); } _downloadMonitor.downloadSnapshotToStorage(data, caller); } @@ -212,7 +212,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { OVFInformationTO ovfInformationTO = answer.getOvfInformationTO(); boolean persistDeployAsIs = deployAsIsHelper.persistTemplateOVFInformationAndUpdateGuestOS(template.getId(), ovfInformationTO, tmpltStoreVO); if (!persistDeployAsIs) { - logger.info("Failed persisting deploy-as-is template details for template " + template.getName()); + logger.info("Failed persisting deploy-as-is template details for template {}", template); return null; } } @@ -221,7 +221,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { } return null; } - logger.info("Updating store ref entry for template " + template.getName()); + logger.info("Updating store ref entry for template {}", template); TemplateDataStoreVO updateBuilder = _templateStoreDao.createForUpdate(); updateBuilder.setDownloadPercent(answer.getDownloadPct()); updateBuilder.setDownloadState(answer.getDownloadStatus()); @@ -378,7 +378,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { result.setResult(answer.getDetails()); } } catch (Exception ex) { - logger.debug("Unable to destroy " + data.getType().toString() + ": " + data.getId(), ex); + logger.debug("Unable to destroy {}: [id: {}, uuid: {}, name: {}]", data.getType().toString(), data.getId(), data.getUuid(), data.getName(), ex); result.setResult(ex.toString()); } callback.complete(result); @@ -443,14 +443,11 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { answer = agentMgr.send(endPoint.getId(), cmd); answer.setContextParam("cmd", cmdExecId.toString()); return answer; - } catch (AgentUnavailableException e) { + } catch (AgentUnavailableException | OperationTimedoutException e) { errMsg = e.toString(); - logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); - } catch (OperationTimedoutException e) { - errMsg = e.toString(); - logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent [id: {}, uuid: {}]: {}", endPoint.getId(), endPoint.getUuid(), e.toString()); } - throw new CloudRuntimeException("Failed to send command, due to Agent:" + endPoint.getId() + ", " + errMsg); + throw new CloudRuntimeException(String.format("Failed to send command, due to Agent: [id: %s, uuid: %s], %s", endPoint.getId(), endPoint.getUuid(), errMsg)); } @Override @@ -507,7 +504,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { Answer answer = null; String errMsg = null; if (logger.isDebugEnabled()) { - logger.debug("Create Datadisk template: " + dataDiskTemplate.getId()); + logger.debug("Create Datadisk template: {}", dataDiskTemplate); } CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable); EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore()); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 35153a10996..6d6cb7b70a9 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -20,6 +20,8 @@ package org.apache.cloudstack.storage.vmsnapshot; import java.util.List; +import com.cloud.uservm.UserVm; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.VMSnapshotTO; @@ -37,7 +39,7 @@ public interface VMSnapshotHelper { VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); - Long getStoragePoolForVM(Long vmId); + StoragePoolVO getStoragePoolForVM(UserVm vm); Storage.StoragePoolType getStoragePoolType(Long poolId); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index e4c26932619..7f28224a316 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -26,6 +26,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.logging.log4j.Logger; @@ -74,6 +75,8 @@ public class PrimaryDataStoreHelper { @Inject protected StoragePoolHostDao storagePoolHostDao; @Inject + protected ClusterDao clusterDao; + @Inject private AnnotationDao annotationDao; public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { @@ -266,7 +269,7 @@ public class PrimaryDataStoreHelper { this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, poolVO.getId()); txn.commit(); - logger.debug("Storage pool id=" + poolVO.getId() + " is removed successfully"); + logger.debug("Storage pool {} is removed successfully", poolVO); return true; } @@ -286,7 +289,7 @@ public class PrimaryDataStoreHelper { _capacityDao.update(capacity.getId(), capacity); } }); - logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to zone"); + logger.debug("Scope of storage pool {} is changed to zone", pool); } public void switchToCluster(DataStore store, ClusterScope clusterScope) { @@ -312,6 +315,6 @@ public class PrimaryDataStoreHelper { _capacityDao.update(capacity.getId(), capacity); } }); - logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to cluster id=" + clusterScope.getScopeId()); + logger.debug("Scope of storage pool {} is changed to cluster {}", pool::toString, () -> clusterDao.findById(clusterScope.getScopeId())); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 7f373fa9988..6a10c26cc0b 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -469,6 +468,6 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid"); + return pdsv.toString(); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java index 1ee4d40a567..de3be809a05 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java @@ -22,6 +22,7 @@ import java.util.List; import javax.inject.Inject; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; @@ -55,6 +56,8 @@ public class BasePrimaryDataStoreLifeCycleImpl { @Inject protected HostDao hostDao; @Inject + protected DataCenterDao zoneDao; + @Inject protected StoragePoolHostDao storagePoolHostDao; private List getPoolHostsList(ClusterScope clusterScope, HypervisorType hypervisorType) { @@ -76,7 +79,7 @@ public class BasePrimaryDataStoreLifeCycleImpl { if (hosts != null) { for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), store.getId()); + storageMgr.connectHostToSharedPool(host, store.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + store, e); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index c6d9fab5f17..1afc1a68b44 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -27,6 +27,7 @@ import com.cloud.agent.api.SetupPersistentNetworkCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.alert.AlertManager; import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.StorageConflictException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; @@ -85,6 +86,8 @@ public class DefaultHostListener implements HypervisorHostListener { @Inject StorageService storageService; @Inject + DataCenterDao zoneDao; + @Inject NetworkOfferingDao networkOfferingDao; @Inject HostDao hostDao; @@ -103,7 +106,7 @@ public class DefaultHostListener implements HypervisorHostListener { private boolean createPersistentNetworkResourcesOnHost(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - logger.warn(String.format("Host with id %ld can't be found", hostId)); + logger.warn("Host with id {} can't be found", hostId); return false; } setupPersistentNetwork(host); @@ -134,32 +137,32 @@ public class DefaultHostListener implements HypervisorHostListener { ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); cmd.setWait(modifyStoragePoolCommandWait); - logger.debug(String.format("Sending modify storage pool command to agent: %d for storage pool: %d with timeout %d seconds", - hostId, poolId, cmd.getWait())); + HostVO host = hostDao.findById(hostId); + logger.debug("Sending modify storage pool command to agent: {} for storage pool: {} with timeout {} seconds", host, pool, cmd.getWait()); final Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %d", pool, hostId); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + - pool.getId()); + throw new CloudRuntimeException(String.format("Unable to establish connection from storage head to storage pool %s due to %s %s", + pool, answer.getDetails(), pool.getUuid())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + - pool.getId() + "Host=" + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format( + "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%s Host=%d", pool, hostId); ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) { String datastoreName = mspAnswer.getLocalDatastoreName(); List localStoragePools = this.primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); - throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" - + localStoragePool.getName()); + logger.warn("Storage pool: {} has already been added as local storage: {}", pool, localStoragePool); + throw new StorageConflictException(String.format( + "Cannot add shared storage pool: %s because it has already been added as local storage: %s", pool, localStoragePool)); } } } @@ -173,7 +176,7 @@ public class DefaultHostListener implements HypervisorHostListener { storageService.updateStorageCapabilities(poolId, false); - logger.info("Connection established between storage pool " + pool + " and host " + hostId); + logger.info("Connection established between storage pool {} and host {}", pool, host); return createPersistentNetworkResourcesOnHost(hostId); } @@ -222,12 +225,11 @@ public class DefaultHostListener implements HypervisorHostListener { new CleanupPersistentNetworkResourceCommand(createNicTOFromNetworkAndOffering(persistentNetworkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(hostId, cleanupCmd); if (answer == null) { - logger.error("Unable to get answer to the cleanup persistent network command " + persistentNetworkVO.getId()); + logger.error("Unable to get answer to the cleanup persistent network command {}", persistentNetworkVO); continue; } if (!answer.getResult()) { - String msg = String.format("Unable to cleanup persistent network resources from network %d on the host %d", persistentNetworkVO.getId(), hostId); - logger.error(msg); + logger.error("Unable to cleanup persistent network resources from network {} on the host {}", persistentNetworkVO, hostId); } } return true; @@ -258,11 +260,11 @@ public class DefaultHostListener implements HypervisorHostListener { new SetupPersistentNetworkCommand(createNicTOFromNetworkAndOffering(networkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(host.getId(), persistentNetworkCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get answer to the setup persistent network command " + networkVO.getId()); + throw new CloudRuntimeException(String.format("Unable to get answer to the setup persistent network command %s", networkVO)); } if (!answer.getResult()) { - String msg = String.format("Unable to create persistent network resources for network %d on the host %d in zone %d", networkVO.getId(), host.getId(), networkVO.getDataCenterId()); - logger.error(msg); + logger.error("Unable to create persistent network resources for network {} on the host {} in zone {}", + networkVO::toString, host::toString, () -> zoneDao.findById(networkVO.getDataCenterId())); } } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 825a8cbd941..4a9f34c9f56 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -900,7 +900,7 @@ public class VolumeObject implements VolumeInfo { volumeVO.setPassphraseId(null); volumeDao.persist(volumeVO); - logger.debug(String.format("Checking to see if we can delete passphrase id %s", passphraseId)); + logger.debug("Checking to see if we can delete passphrase id {} for volume {}", passphraseId, volumeVO); List volumes = volumeDao.listVolumesByPassphraseId(passphraseId); if (volumes != null && !volumes.isEmpty()) { @@ -944,4 +944,11 @@ public class VolumeObject implements VolumeInfo { public boolean isFollowRedirects() { return followRedirects; } + + @Override + public String toString() { + return String.format("VolumeObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "volumeVO", "dataStore")); + } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 3ca1d9201db..bf67be91108 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,6 +32,7 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; @@ -163,6 +164,8 @@ public class VolumeServiceImpl implements VolumeService { @Inject VolumeDao volDao; @Inject + VMInstanceDao vmDao; + @Inject PrimaryDataStoreProviderManager dataStoreMgr; @Inject DataMotionService motionSrv; @@ -191,7 +194,7 @@ public class VolumeServiceImpl implements VolumeService { @Inject HostDao _hostDao; @Inject - private PrimaryDataStoreDao storagePoolDao; + PrimaryDataStoreDao storagePoolDao; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Inject @@ -378,7 +381,7 @@ public class VolumeServiceImpl implements VolumeService { if (volume.getDataStore() == null) { logger.info("Expunge volume with no data store specified"); if (canVolumeBeRemoved(volume.getId())) { - logger.info("Volume " + volume.getId() + " is not referred anywhere, remove it from volumes table"); + logger.info("Volume {} is not referred anywhere, remove it from volumes table", volume); volDao.remove(volume.getId()); } future.complete(result); @@ -389,7 +392,7 @@ public class VolumeServiceImpl implements VolumeService { VolumeDataStoreVO volumeStore = _volumeStoreDao.findByVolume(volume.getId()); if (volumeStore != null) { if (volumeStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { - String msg = "Volume: " + volume.getName() + " is currently being uploaded; can't delete it."; + String msg = String.format("Volume: %s is currently being uploaded; can't delete it.", volume); logger.debug(msg); result.setSuccess(false); result.setResult(msg); @@ -400,7 +403,7 @@ public class VolumeServiceImpl implements VolumeService { VolumeVO vol = volDao.findById(volume.getId()); if (vol == null) { - logger.debug("Volume " + volume.getId() + " is not found"); + logger.debug("Volume {} is not found", volume); future.complete(result); return future; } @@ -484,7 +487,7 @@ public class VolumeServiceImpl implements VolumeService { } if (canVolumeBeRemoved(vo.getId())) { - logger.info("Volume " + vo.getId() + " is not referred anywhere, remove it from volumes table"); + logger.info("Volume {} is not referred anywhere, remove it from volumes table", vo); volDao.remove(vo.getId()); } @@ -633,10 +636,10 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId(), deployAsIsConfiguration); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + template.getUniqueName() + " in storage pool " + dataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", template.getImage(), dataStore)); } else { if (logger.isDebugEnabled()) { - logger.debug("Found template " + template.getUniqueName() + " in storage pool " + dataStore.getId() + " with VMTemplateStoragePool id: " + templatePoolRef.getId()); + logger.debug("Found template {} in storage pool {} with VMTemplateStoragePool: {}", template.getImage(), dataStore, templatePoolRef); } } long templatePoolRefId = templatePoolRef.getId(); @@ -656,8 +659,8 @@ public class VolumeServiceImpl implements VolumeService { } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId(), deployAsIsConfiguration); if (templatePoolRef != null && templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { - logger.info( - "Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); + logger.info("Unable to acquire lock on VMTemplateStoragePool {}, But " + + "Template {} is already copied to primary storage, skip copying", templatePoolRefId, template); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); return; } @@ -669,7 +672,7 @@ public class VolumeServiceImpl implements VolumeService { } try { if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { - logger.info("Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); + logger.info("Template {} is already copied to primary storage, skip copying", template.getImage()); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); return; } @@ -891,7 +894,7 @@ public class VolumeServiceImpl implements VolumeService { try { destroyAndReallocateManagedVolume((VolumeInfo) vo); } catch (CloudRuntimeException ex) { - logger.warn("Couldn't destroy managed volume: " + vo.getId()); + logger.warn("Couldn't destroy managed volume: {}", vo); } } @@ -912,7 +915,7 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore); } else if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { // Template already exists return templateOnPrimary; @@ -945,7 +948,7 @@ public class VolumeServiceImpl implements VolumeService { errMesg = callback.result.getResult(); } templateOnPrimary.processEvent(Event.OperationFailed); - throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg); + throw new CloudRuntimeException(String.format("Unable to create template %s on primary storage %s: %s", templateOnPrimary.getImage(), destPrimaryDataStore, errMesg)); } templateOnPrimary.processEvent(Event.OperationSuccessed); @@ -1036,7 +1039,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary.getImage(), destHost)); } templateOnPrimary.processEvent(Event.CopyingRequested); @@ -1057,12 +1060,12 @@ public class VolumeServiceImpl implements VolumeService { targets.add(details); - removeDynamicTargets(destHost.getId(), targets); + removeDynamicTargets(destHost, targets); } } if (result.isFailed()) { - throw new CloudRuntimeException("Failed to copy template " + templateOnPrimary.getId() + " to primary storage " + destPrimaryDataStore.getId() + ": " + result.getResult()); + throw new CloudRuntimeException(String.format("Failed to copy template %s to primary storage %s: %s", templateOnPrimary, destPrimaryDataStore, result.getResult())); // XXX: I find it is useful to destroy the volume on primary storage instead of another thread trying the copy again because I've seen // something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail). // For now, I just retry the copy. @@ -1080,7 +1083,7 @@ public class VolumeServiceImpl implements VolumeService { } } - private void removeDynamicTargets(long hostId, List> targets) { + private void removeDynamicTargets(Host host, List> targets) { ModifyTargetsCommand cmd = new ModifyTargetsCommand(); cmd.setTargets(targets); @@ -1088,20 +1091,16 @@ public class VolumeServiceImpl implements VolumeService { cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { - String msg = "Unable to get an answer to the modify targets command"; - - logger.warn(msg); + logger.warn("Unable to get an answer to the modify targets command"); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; - - logger.warn(msg); + logger.warn("Unable to modify target on the following host: {}", host); } } @@ -1117,12 +1116,12 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), volumeInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + templateOnPrimary.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", templateOnPrimary.getImage(), destPrimaryDataStore)); } //XXX: not sure if this the right thing to do here. We can always fallback to the "copy from sec storage" if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { - throw new CloudRuntimeException("Template " + templateOnPrimary.getUniqueName() + " has not been downloaded to primary storage."); + throw new CloudRuntimeException(String.format("Template %s has not been downloaded to primary storage.", templateOnPrimary.getImage())); } try { @@ -1149,7 +1148,7 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateOnPrimary.getUniqueName() + " in storage pool " + srcTemplateOnPrimary.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateOnPrimary.getImage(), srcTemplateOnPrimary)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1162,7 +1161,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost)); } _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); @@ -1211,7 +1210,7 @@ public class VolumeServiceImpl implements VolumeService { try { destroyAndReallocateManagedVolume(volumeInfo); } catch (CloudRuntimeException ex) { - logger.warn("Failed to destroy managed volume: " + volumeInfo.getId()); + logger.warn("Failed to destroy managed volume: {}", volumeInfo); errMsg += " : " + ex.getMessage(); } @@ -1248,21 +1247,21 @@ public class VolumeServiceImpl implements VolumeService { VolumeVO newVolume = (VolumeVO) newVol; newVolume.set_iScsiName(null); volDao.update(newVolume.getId(), newVolume); - logger.debug("Allocated new volume: " + newVolume.getId() + " for the VM: " + volume.getInstanceId()); + logger.debug("Allocated new volume: {} for the VM: {}", newVolume::toString, () -> (volume.getInstanceId() != null ? vmDao.findById(volume.getInstanceId()) : null)); try { AsyncCallFuture expungeVolumeFuture = expungeVolumeAsync(volumeInfo); VolumeApiResult expungeVolumeResult = expungeVolumeFuture.get(); if (expungeVolumeResult.isFailed()) { - logger.warn("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); - throw new CloudRuntimeException("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); + logger.warn("Failed to expunge volume: {} that was created", volumeInfo); + throw new CloudRuntimeException(String.format("Failed to expunge volume: %s that was created", volumeInfo.getVolume())); } } catch (Exception ex) { if (canVolumeBeRemoved(volumeInfo.getId())) { volDao.remove(volumeInfo.getId()); } - logger.warn("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); - throw new CloudRuntimeException("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); + logger.warn("Unable to expunge volume: {} due to: {}", volumeInfo, ex.getMessage()); + throw new CloudRuntimeException(String.format("Unable to expunge volume: %s due to: %s", volumeInfo.getVolume(), ex.getMessage())); } } @@ -1382,12 +1381,14 @@ public class VolumeServiceImpl implements VolumeService { templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + throw new CloudRuntimeException(String.format("Failed to create template %s on primary storage: %s", + srcTemplateInfo.getImage(), destPrimaryDataStore)); } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", + srcTemplateInfo.getImage(), destPrimaryDataStore)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1407,7 +1408,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost)); } templateOnPrimary.processEvent(Event.CopyingRequested); @@ -1416,8 +1417,8 @@ public class VolumeServiceImpl implements VolumeService { //Download and copy template to the managed volume TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId); if (templateOnPrimaryNow == null) { - logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); - throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + logger.debug("Failed to prepare ready bypassed template: {} on primary storage: {}", srcTemplateInfo, templateOnPrimary); + throw new CloudRuntimeException(String.format("Failed to prepare ready bypassed template: %s on primary storage: %s", srcTemplateInfo, templateOnPrimary)); } templateOnPrimary.processEvent(Event.OperationSuccessed); return templateOnPrimaryNow; @@ -1459,7 +1460,7 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture future = new AsyncCallFuture<>(); if (storageCanCloneVolume && computeSupportsVolumeClone) { - logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); + logger.debug("Storage {} can support cloning using a cached template and compute side is OK with volume cloning.", destPrimaryDataStore); GlobalLock lock = null; TemplateInfo templateOnPrimary = null; @@ -1483,7 +1484,7 @@ public class VolumeServiceImpl implements VolumeService { templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + throw new CloudRuntimeException(String.format("Failed to create template %s on primary storage: %s", srcTemplateInfo, destPrimaryDataStore)); } } @@ -1491,7 +1492,7 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateInfo, destPrimaryDataStore)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1516,7 +1517,7 @@ public class VolumeServiceImpl implements VolumeService { if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { // We have a template on primary storage. Clone it to new volume. - logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + logger.debug("Creating a clone from template on primary storage {}", destPrimaryDataStore); createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { @@ -1848,13 +1849,8 @@ public class VolumeServiceImpl implements VolumeService { if (logger.isDebugEnabled()) { String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : ""); - String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)" - , srcVolume.getName() - , srcVolume.getId() - , srcRole - , destStore.getName() - , destStore.getId() - , destStore.getRole()); + String msg = String.format("copying %s (role=%s) to %s (role=%s)", + srcVolume, srcRole, destStore, destStore.getRole()); logger.debug(msg); } @@ -1917,7 +1913,7 @@ public class VolumeServiceImpl implements VolumeService { srcVolume.processEvent(Event.OperationFailed); destroyVolume(destVolume.getId()); if (destVolume.getStoragePoolType() == StoragePoolType.PowerFlex) { - logger.info("Dest volume " + destVolume.getId() + " can be removed"); + logger.info("Dest volume {} can be removed", destVolume); destVolume.processEvent(Event.ExpungeRequested); destVolume.processEvent(Event.OperationSuccessed); volDao.remove(destVolume.getId()); @@ -1961,12 +1957,12 @@ public class VolumeServiceImpl implements VolumeService { volDao.updateUuid(sourceVolumeId, destinationVolume.getId()); volDao.detachVolume(sourceVolumeId); - logger.info(String.format("Cleaning up %s on storage [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId())); + logger.info("Cleaning up {} on storage [{}].", sourceVolumeVo, sourceVolume.getDataStore()); destroyVolume(sourceVolumeId); try { if (sourceVolume.getStoragePoolType() == StoragePoolType.PowerFlex) { - logger.info(String.format("Source volume %s can be removed.", sourceVolumeVo.getVolumeDescription())); + logger.info("Source volume {} can be removed.", sourceVolumeVo); sourceVolume.processEvent(Event.ExpungeRequested); sourceVolume.processEvent(Event.OperationSuccessed); volDao.remove(sourceVolume.getId()); @@ -1975,7 +1971,7 @@ public class VolumeServiceImpl implements VolumeService { expungeSourceVolumeAfterMigration(sourceVolumeVo, retryExpungeVolumeAsync); return true; } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Failed to clean up %s on storage [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId()), e); + logger.error("Failed to clean up {} on storage [{}].", sourceVolumeVo, sourceVolume.getDataStore(), e); return false; } } @@ -1987,13 +1983,14 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture destroyFuture = expungeVolumeAsync(sourceVolume); VolumeApiResult volumeApiResult = destroyFuture.get(); + StoragePoolVO pool = storagePoolDao.findById(sourceVolumeVo.getPoolId()); if (volumeApiResult.isSuccess()) { - logger.debug(String.format("%s on storage [%s] was cleaned up successfully.", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId())); + logger.debug("{} on storage [{}] was cleaned up successfully.", sourceVolumeVo, pool); return; } - String message = String.format("Failed to clean up %s on storage [%s] due to [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId(), - volumeApiResult.getResult()); + String message = String.format("Failed to clean up %s on storage [%s] due to [%s].", + sourceVolumeVo, pool, volumeApiResult.getResult()); if (!retryExpungeVolumeAsync) { logger.warn(message); @@ -2059,7 +2056,7 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture createVolumeFuture = createVolumeAsync(destVolume, destStore); VolumeApiResult createVolumeResult = createVolumeFuture.get(); if (createVolumeResult.isFailed()) { - logger.debug("Failed to create dest volume " + destVolume.getId() + ", volume can be removed"); + logger.debug("Failed to create dest volume {}, volume can be removed", destVolume); destroyVolume(destVolume.getId()); destVolume.processEvent(Event.ExpungeRequested); destVolume.processEvent(Event.OperationSuccessed); @@ -2204,14 +2201,12 @@ public class VolumeServiceImpl implements VolumeService { } if (StringUtils.isAnyEmpty(srcPoolSystemId, destPoolSystemId)) { - logger.warn("PowerFlex src pool: " + srcDataStore.getId() + " or dest pool: " + destDataStore.getId() + - " storage instance details are not available"); + logger.warn("PowerFlex src pool: {} or dest pool: {} storage instance details are not available", srcDataStore, destDataStore); return false; } if (!srcPoolSystemId.equals(destPoolSystemId)) { - logger.debug("PowerFlex src pool: " + srcDataStore.getId() + " and dest pool: " + destDataStore.getId() + - " belongs to different storage instances, create new managed volume"); + logger.debug("PowerFlex src pool: {} and dest pool: {} belongs to different storage instances, create new managed volume", srcDataStore, destDataStore); return true; } } @@ -2407,7 +2402,7 @@ public class VolumeServiceImpl implements VolumeService { EndPoint ep = _epSelector.select(store); if (ep == null) { - String errorMessage = "There is no secondary storage VM for image store " + store.getName(); + String errorMessage = String.format("There is no secondary storage VM for image store %s", store); logger.warn(errorMessage); throw new CloudRuntimeException(errorMessage); } @@ -2439,7 +2434,7 @@ public class VolumeServiceImpl implements VolumeService { if (volStore != null) { physicalSize = volStore.getPhysicalSize(); } else { - logger.warn("No entry found in volume_store_ref for volume id: " + vo.getId() + " and image store id: " + ds.getId() + " at the end of uploading volume!"); + logger.warn("No entry found in volume_store_ref for volume: {} and image store: {} at the end of uploading volume!", vo, ds); } Scope dsScope = ds.getScope(); if (dsScope.getScopeType() == ScopeType.ZONE) { @@ -2447,7 +2442,7 @@ public class VolumeServiceImpl implements VolumeService { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_UPLOAD, vo.getAccountId(), dsScope.getScopeId(), vo.getId(), vo.getName(), null, null, physicalSize, vo.getSize(), Volume.class.getName(), vo.getUuid()); } else { - logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); + logger.warn("Zone scope image store {} has a null scope id", ds); } } else if (dsScope.getScopeType() == ScopeType.REGION) { // publish usage event for region-wide image store using a -1 zoneId for 4.2, need to revisit post-4.2 @@ -2590,8 +2585,11 @@ public class VolumeServiceImpl implements VolumeService { for (VolumeDataStoreVO volumeStore : dbVolumes) { VolumeVO volume = volDao.findById(volumeStore.getVolumeId()); if (volume == null) { - logger.warn("Volume_store_ref table shows that volume " + volumeStore.getVolumeId() + " is on image store " + storeId - + ", but the volume is not found in volumes table, potentially some bugs in deleteVolume, so we just treat this volume to be deleted and mark it as destroyed"); + logger.warn("Volume_store_ref table shows that volume {} is " + + "on image store {}, but the volume is not found in volumes " + + "table, potentially some bugs in deleteVolume, so we just " + + "treat this volume to be deleted and mark it as destroyed", + volumeStore.getVolumeId(), store); volumeStore.setDestroyed(true); _volumeStoreDao.update(volumeStore.getId(), volumeStore); continue; @@ -2600,27 +2598,26 @@ public class VolumeServiceImpl implements VolumeService { if (volumeInfos.containsKey(volume.getId())) { TemplateProp volInfo = volumeInfos.remove(volume.getId()); toBeDownloaded.remove(volumeStore); - logger.info("Volume Sync found " + volume.getUuid() + " already in the volume image store table"); + logger.info("Volume Sync found {} already in the volume image store table", volume); if (volumeStore.getDownloadState() != Status.DOWNLOADED) { volumeStore.setErrorString(""); } if (volInfo.isCorrupted()) { volumeStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Volume " + volume.getUuid() + " is corrupted on image store"; + String msg = String.format("Volume %s is corrupted on image store", volume); volumeStore.setErrorString(msg); logger.info(msg); if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) { - logger.info("Volume Sync found " + volume.getUuid() + " uploaded using SSVM on image store " + storeId + " as corrupted, marking it as failed"); + logger.info("Volume Sync found {} uploaded using SSVM on image store {} as corrupted, marking it as failed", volume, store); _volumeStoreDao.update(volumeStore.getId(), volumeStore); // mark volume as failed, so that storage GC will clean it up VolumeObject volObj = (VolumeObject)volFactory.getVolume(volume.getId()); volObj.processEvent(Event.OperationFailed); } else if (volumeStore.getDownloadUrl() == null) { - msg = "Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() + " is corrupted, please check in image store: " - + volumeStore.getDataStoreId(); + msg = String.format("Volume (%s) with install path %s is corrupted, please check in image store: %s", volume, volInfo.getInstallPath(), store); logger.warn(msg); } else { - logger.info("Removing volume_store_ref entry for corrupted volume " + volume.getName()); + logger.info("Removing volume_store_ref entry for corrupted volume {}", volume); _volumeStoreDao.remove(volumeStore.getId()); toBeDownloaded.add(volumeStore); } @@ -2660,10 +2657,10 @@ public class VolumeServiceImpl implements VolumeService { } continue; } else if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) { // failed uploads through SSVM - logger.info("Volume Sync did not find " + volume.getUuid() + " uploaded using SSVM on image store " + storeId + ", marking it as failed"); + logger.info("Volume Sync did not find {} uploaded using SSVM on image store {}, marking it as failed", volume, store); toBeDownloaded.remove(volumeStore); volumeStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Volume " + volume.getUuid() + " is corrupted on image store"; + String msg = String.format("Volume %s is corrupted on image store", volume); volumeStore.setErrorString(msg); _volumeStoreDao.update(volumeStore.getId(), volumeStore); // mark volume as failed, so that storage GC will clean it up @@ -2673,7 +2670,7 @@ public class VolumeServiceImpl implements VolumeService { } // Volume is not on secondary but we should download. if (volumeStore.getDownloadState() != Status.DOWNLOADED) { - logger.info("Volume Sync did not find " + volume.getName() + " ready on image store " + storeId + ", will request download to start/resume shortly"); + logger.info("Volume Sync did not find {} ready on image store {}, will request download to start/resume shortly", volume, store); } } @@ -2694,9 +2691,10 @@ public class VolumeServiceImpl implements VolumeService { } } - logger.debug("Volume " + volumeHost.getVolumeId() + " needs to be downloaded to " + store.getName()); // reset volume status back to Allocated VolumeObject vol = (VolumeObject)volFactory.getVolume(volumeHost.getVolumeId()); + logger.debug("Volume {} needs to be downloaded to {}", vol, store); + vol.processEvent(Event.OperationFailed); // reset back volume status // remove leftover volume_store_ref entry since re-download will create it again _volumeStoreDao.remove(volumeHost.getId()); @@ -2729,10 +2727,10 @@ public class VolumeServiceImpl implements VolumeService { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - logger.info("Failed to deleted volume at store: " + store.getName()); + logger.info("Failed to deleted volume at store: {}", store); } else { - String description = "Deleted volume " + tInfo.getTemplateName() + " on secondary storage " + storeId; + String description = String.format("Deleted volume %s on secondary storage %s", tInfo.getTemplateName(), store); logger.info(description); } } @@ -2740,7 +2738,7 @@ public class VolumeServiceImpl implements VolumeService { syncLock.unlock(); } } else { - logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing volume sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on {}, another thread may be doing volume sync on data store {} now.", lockString, store); } } finally { syncLock.releaseRef(); @@ -2763,7 +2761,7 @@ public class VolumeServiceImpl implements VolumeService { return tanswer.getTemplateInfo(); } else { if (logger.isDebugEnabled()) { - logger.debug("Can not list volumes for image store " + store.getId()); + logger.debug("Can not list volumes for image store {}", store); } } @@ -2776,11 +2774,11 @@ public class VolumeServiceImpl implements VolumeService { try { snapshot = snapshotMgr.takeSnapshot(volume); } catch (CloudRuntimeException cre) { - logger.error("Take snapshot: " + volume.getId() + " failed", cre); + logger.error("Take snapshot: {} failed", volume, cre); throw cre; } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug("unknown exception while taking snapshot for volume " + volume.getId() + " was caught", e); + logger.debug("unknown exception while taking snapshot for volume {} was caught", volume, e); } throw new CloudRuntimeException("Failed to take snapshot", e); } @@ -2793,7 +2791,7 @@ public class VolumeServiceImpl implements VolumeService { if (HypervisorType.KVM.equals(host.getHypervisorType()) && DataObjectType.VOLUME.equals(dataObject.getType())) { VolumeInfo volumeInfo = volFactory.getVolume(dataObject.getId()); if (VolumeApiServiceImpl.AllowCheckAndRepairVolume.valueIn(volumeInfo.getPoolId())) { - logger.info(String.format("Trying to check and repair the volume %d", dataObject.getId())); + logger.info("Trying to check and repair the volume {}", dataObject); String repair = CheckAndRepairVolumeCmd.RepairValues.LEAKS.name().toLowerCase(); CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(repair); volumeInfo.addPayload(payload); @@ -2904,9 +2902,8 @@ public class VolumeServiceImpl implements VolumeService { logger.debug(String.format("Volume [%s] is not present in the secondary storage. Therefore we do not need to move it in the secondary storage.", volume)); return; } - logger.debug(String.format("Volume [%s] is present in secondary storage. It will be necessary to move it from the source account's [%s] folder to the destination " - + "account's [%s] folder.", - volume.getUuid(), sourceAccount, destAccount)); + logger.debug("Volume [{}] is present in secondary storage. It will be necessary to move it from the source account's [{}] folder to the destination " + + "account's [{}] folder.", volume, sourceAccount, destAccount); VolumeInfo volumeInfo = volFactory.getVolume(volume.getId(), DataStoreRole.Image); String datastoreUri = volumeInfo.getDataStore().getUri(); @@ -2922,17 +2919,17 @@ public class VolumeServiceImpl implements VolumeService { if (!answer.getResult()) { String msg = String.format("Unable to move volume [%s] from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage, due " + "to [%s].", - volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); + volume, srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } - logger.debug(String.format("Volume [%s] was moved from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage.", - volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount)); + logger.debug("Volume [{}] was moved from [{}] (source account's [{}] folder) to [{}] (destination account's [{}] folder) in the secondary storage.", + volume, srcPath.getParent(), sourceAccount, destPath, destAccount); volumeStore.setInstallPath(String.format("%s/%s", destPath, srcPath.getFileName().toString())); if (!_volumeStoreDao.update(volumeStore.getId(), volumeStore)) { - String msg = String.format("Unable to update volume [%s] install path in the DB.", volumeStore.getVolumeId()); + String msg = String.format("Unable to update volume [%s] install path in the DB.", volume); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java index 355eb075129..538ba1a1761 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java @@ -100,7 +100,7 @@ public class BasePrimaryDataStoreLifeCycleImplTest { ReflectionTestUtils.setField(host, "id", HOST_ID); List hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware); Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, ZONE_ID, CLUSTER_ID)).thenReturn(Arrays.asList(host)); - Mockito.when(storageManager.connectHostToSharedPool(HOST_ID, POOL_ID)).thenReturn(true); + Mockito.when(storageManager.connectHostToSharedPool(host, POOL_ID)).thenReturn(true); dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, null); diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index c4241dfbc3a..aa5ac3b9a76 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -47,6 +47,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -85,6 +86,9 @@ public class VolumeServiceTest extends TestCase{ @Mock StorageManager storageManagerMock; + @Mock + PrimaryDataStoreDao primaryDataStoreDao; + @Mock VolumeVO volumeVoMock; @@ -105,6 +109,7 @@ public class VolumeServiceTest extends TestCase{ volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; volumeServiceImplSpy._storageMgr = storageManagerMock; volumeServiceImplSpy._hostDao = hostDaoMock; + volumeServiceImplSpy.storagePoolDao = primaryDataStoreDao; volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock; } @@ -220,6 +225,7 @@ public class VolumeServiceTest extends TestCase{ VolumeVO vo = new VolumeVO() {}; vo.setPoolType(Storage.StoragePoolType.Filesystem); volumeObject.configure(null, vo); + vo.setPoolId(1L); List exceptions = new ArrayList<>(Arrays.asList(new InterruptedException(), new ExecutionException() {})); diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index 0ec566a4194..e26e32e7b2e 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -110,6 +110,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C // _msid is the unique persistent identifier that peer name is based upon // private Long _mshostId = null; + private ManagementServerHostVO _mshost = null; protected long _msId = ManagementServerNode.getManagementServerId(); protected long _runId = System.currentTimeMillis(); @@ -380,11 +381,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } try { if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); + logger.debug("Forwarding {} to {}", cmds, peer); } executeAsync(peerName, agentId, cmds, true); } catch (final Exception e) { - logger.warn("Caught exception while talking to " + peer.getMsid()); + logger.warn("Caught exception while talking to {}", peer); } } } @@ -408,11 +409,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final String peerName = Long.toString(peer.getMsid()); try { if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + status + " to " + peer.getMsid()); + logger.debug("Forwarding {} to {}", status, peer); } sendStatus(peerName, status); } catch (final Exception e) { - String msg = String.format("Caught exception while talking to %d", peer.getMsid()); + String msg = String.format("Caught exception while talking to %s", peer); logger.warn(msg); logger.debug(msg, e); } @@ -503,7 +504,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C logger.debug("Notify management server node join to listeners."); for (final ManagementServerHostVO mshost : nodeList) { - logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Joining node, IP: {}, ms: {}", mshost.getServiceIP(), mshost); } } @@ -523,7 +524,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C for (final ManagementServerHostVO mshost : nodeList) { if (logger.isDebugEnabled()) { - logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Leaving node, IP: {}, ms: {}", mshost.getServiceIP(), mshost); } cancelClusterRequestToPeer(String.valueOf(mshost.getMsid())); } @@ -595,7 +596,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profilerHeartbeatUpdate.start(); txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); if (logger.isTraceEnabled()) { - logger.trace("Cluster manager heartbeat update, id:" + _mshostId); + logger.trace("Cluster manager heartbeat update, id: {}, mshost: {}", _mshostId, _mshost); } _mshostDao.update(_mshostId, _runId, DateUtil.currentGMTTime()); @@ -603,7 +604,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profilerPeerScan.start(); if (logger.isTraceEnabled()) { - logger.trace("Cluster manager peer-scan, id:" + _mshostId); + logger.trace("Cluster manager peer-scan, id: {}, mshost: {}", _mshostId, _mshost); } if (!_peerScanInited) { @@ -811,8 +812,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (logger.isInfoEnabled()) { logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); for (final ManagementServerHostVO host : inactiveList) { - logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + - ", version: " + host.getVersion()); + logger.info("management server node ms: {}, service ip: {}, version: {}", host, host.getServiceIP(), host.getVersion()); } } @@ -820,7 +820,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C for (final ManagementServerHostVO host : inactiveList) { // Check if peer state is Up in the period if (!_mshostPeerDao.isPeerUpState(_mshostId, host.getId(), new Date(cutTime.getTime() - HeartbeatThreshold.value()))) { - logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and did not send node status to this node"); + logger.warn("Management node {} is detected inactive by timestamp and did not send node status to this node", host); downHostList.add(host); } } @@ -865,7 +865,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (current == null) { if (entry.getKey().longValue() != _mshostId.longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + logger.debug("Detected management node left {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } removedNodeList.add(entry.getValue()); } @@ -873,15 +874,16 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (current.getRunid() == 0) { if (entry.getKey().longValue() != _mshostId.longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + - entry.getValue().getServiceIP()); + logger.debug("Detected management node left because of invalidated session {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } invalidatedNodeList.add(entry.getValue()); } } else { if (entry.getValue().getRunid() != current.getRunid()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + logger.debug("Detected management node left and rejoined quickly {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } entry.getValue().setRunid(current.getRunid()); @@ -954,7 +956,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final ManagementServerHostVO mshost = it.next(); // Check if peer state is Up in the period if (!_mshostPeerDao.isPeerUpState(_mshostId, mshost.getId(), new Date(cutTime.getTime() - HeartbeatThreshold.value()))) { - logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and did not send node status to this node"); + logger.warn("Management node {} is detected inactive by timestamp and did not send node status to this node", mshost); _activePeers.remove(mshost.getId()); try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); @@ -962,7 +964,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } else { - logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but sent node status to this node"); + logger.info("Management node {} is detected inactive by timestamp but sent node status to this node", mshost); it.remove(); } } @@ -979,7 +981,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C _activePeers.put(mshost.getId(), mshost); if (logger.isDebugEnabled()) { - logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); + logger.debug("Detected management node joined, {}, nodeIP:{}", mshost, mshost.getServiceIP()); } newNodeList.add(mshost); @@ -1032,7 +1034,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @DB public boolean start() { if (logger.isInfoEnabled()) { - logger.info("Starting Cluster manager, msid : " + _msId); + logger.info("Starting Cluster manager, msid: {}, mshost: {}", _msId, _mshost); } final ManagementServerHostVO mshost = Transaction.execute(new TransactionCallback() { @@ -1058,13 +1060,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C mshost.setUuid(UUID.randomUUID().toString()); _mshostDao.persist(mshost); if (logger.isInfoEnabled()) { - logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); + logger.info("New instance of management server {}, runId {} is being started", mshost, _runId); } } else { _mshostDao.update(mshost.getId(), _runId, NetUtils.getCanonicalHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); if (logger.isInfoEnabled()) { - logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); + logger.info("Management server {}, runId {} is being started", mshost, _runId); } } @@ -1072,9 +1074,10 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } }); + _mshost = mshost; _mshostId = mshost.getId(); if (logger.isInfoEnabled()) { - logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); + logger.info("Management server (host : {}) is being started at {}:{}", _mshost, _clusterNodeIP, _currentServiceAdapter.getServicePort()); } _mshostPeerDao.clearPeerInfo(_mshostId); @@ -1094,7 +1097,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @DB public boolean stop() { if (logger.isInfoEnabled()) { - logger.info("Stopping Cluster manager, msid : " + _msId); + logger.info("Stopping Cluster manager, msid : {}, runId : {}, host : {}", _msId, _runId, _mshost); } if (_mshostId != null) { diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java index 2918ccd22d7..6c3b2a93994 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java @@ -32,6 +32,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "mshost") @@ -199,7 +200,9 @@ public class ManagementServerHostVO implements ManagementServerHost { @Override public String toString() { - return new StringBuilder("ManagementServer[").append("-").append(id).append("-").append(msid).append("-").append(state).append("]").toString(); + return String.format("ManagementServer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "msid")); } @Override diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index 82fea9749ff..c7f2daadc51 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1051,6 +1051,10 @@ public abstract class GenericDaoBase extends Compone } protected T findById(ID id, boolean removed, Boolean lock) { + if (id == null) { + return null; + } + StringBuilder sql = new StringBuilder(_selectByIdSql); if (!removed && _removed != null) { sql.append(" AND ").append(_removed.first()); diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java index 7a14f385fa1..c293de8b4dd 100644 --- a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.framework.events; import com.google.gson.Gson; import com.google.gson.annotations.Expose; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class Event { @@ -49,6 +50,13 @@ public class Event { setResourceUUID(resourceUUID); } + @Override + public String toString() { + return String.format("Event %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "eventId", "eventUuid", "eventType", "resourceType", "resourceUUID", "description")); + } + public Long getEventId() { return eventId; } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java index 6b85ae27f58..0f2c8d1736a 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java @@ -40,6 +40,7 @@ import org.apache.cloudstack.jobs.JobInfo; import com.cloud.utils.UuidUtils; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "async_job") @@ -384,26 +385,10 @@ public class AsyncJobVO implements AsyncJob, JobInfo { @Override public String toString() { - StringBuffer sb = new StringBuffer(); - sb.append("AsyncJobVO: {id:").append(getId()); - sb.append(", userId: ").append(getUserId()); - sb.append(", accountId: ").append(getAccountId()); - sb.append(", instanceType: ").append(getInstanceType()); - sb.append(", instanceId: ").append(getInstanceId()); - sb.append(", cmd: ").append(getCmd()); - sb.append(", cmdInfo: ").append(getCmdInfo()); - sb.append(", cmdVersion: ").append(getCmdVersion()); - sb.append(", status: ").append(getStatus()); - sb.append(", processStatus: ").append(getProcessStatus()); - sb.append(", resultCode: ").append(getResultCode()); - sb.append(", result: ").append(getResult()); - sb.append(", initMsid: ").append(getInitMsid()); - sb.append(", completeMsid: ").append(getCompleteMsid()); - sb.append(", lastUpdated: ").append(getLastUpdated()); - sb.append(", lastPolled: ").append(getLastPolled()); - sb.append(", created: ").append(getCreated()); - sb.append(", removed: ").append(getRemoved()); - sb.append("}"); - return sb.toString(); + return String.format("AsyncJob %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", + "userId", "accountId", "instanceType", "instanceId", "cmd", "cmdInfo", + "cmdVersion", "status", "processStatus", "resultCode", "result", "initMsid", + "completeMsid", "lastUpdated", "lastPolled", "created", "removed")); } } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index ec6674477b0..0ed658aa70d 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -95,7 +95,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { if (vmGroupMapping != null) { if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); + logger.debug("Processing affinity group {} of type 'ExplicitDedication' for VM: {}", _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()), vm); } long affinityGroupId = vmGroupMapping.getAffinityGroupId(); @@ -304,7 +304,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement DedicatedResourceVO dPod = _dedicatedDao.findByPodId(pod.getId()); if (dPod != null && !dedicatedResources.contains(dPod)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Avoiding POD %s [%s] because it is not dedicated.", pod.getName(), pod.getUuid())); + logger.debug(String.format("Avoiding POD %s because it is not dedicated.", pod)); } avoidList.addPod(pod.getId()); } else { @@ -345,7 +345,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement for (HostPodVO pod : pods) { if (podsInIncludeList != null && !podsInIncludeList.contains(pod.getId())) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Avoiding POD %s [%s], as it is not in include list.", pod.getName(), pod.getUuid())); + logger.debug(String.format("Avoiding POD %s, as it is not in include list.", pod)); } avoidList.addPod(pod.getId()); } diff --git a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java index b94cf49e4d9..b97b8e224ad 100644 --- a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java @@ -80,7 +80,7 @@ public class HostAffinityProcessor extends AffinityProcessorBase implements Affi */ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, DeploymentPlan plan, VirtualMachine vm, List vmList) { AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug("Processing affinity group {} for VM {}", group, vm); List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); groupVMIds.remove(vm.getId()); diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 4681ce4321e..bd29a48f258 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -94,7 +94,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug(String.format("Processing affinity group %s for VM %s", group, vm)); } List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -106,7 +106,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements if (groupVM.getHostId() != null) { avoid.addHost(groupVM.getHostId()); if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + logger.debug("Added host {} to avoid set, since VM {} is present on the host", groupVM.getHostId(), groupVM); } } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { @@ -114,8 +114,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { avoid.addHost(groupVM.getLastHostId()); if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + - " is present on the host, in Stopped state but has reserved capacity"); + logger.debug("Added host {} to avoid set, since VM {} is present on the host, in Stopped state but has reserved capacity", groupVM.getLastHostId(), groupVM); } } } @@ -155,8 +154,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { if (logger.isDebugEnabled()) { - logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + - " reserved on the same host " + plannedHostId); + logger.debug(String.format("Planned destination for VM %s conflicts with an existing VM %d reserved on the same host %s", vm, vmReservation.getVmId(), plannedDestination.getHost())); } return false; } diff --git a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java index f227a3ffc8d..49e3f60ed5d 100644 --- a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java +++ b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java @@ -77,7 +77,7 @@ public class NonStrictHostAffinityProcessor extends AffinityProcessorBase implem AffinityGroupVO group = affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug(String.format("Processing affinity group %s for VM: %s", group, vm)); } List groupVMIds = affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -95,17 +95,17 @@ public class NonStrictHostAffinityProcessor extends AffinityProcessorBase implem if (groupVM.getHostId() != null) { Integer priority = adjustHostPriority(plan, groupVM.getHostId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Updated host %s priority to %s , since VM %s is present on the host", - groupVM.getHostId(), priority, groupVM.getId())); + logger.debug(String.format("Updated host %s priority to %s, since VM %s is present on the host", + groupVM.getHostId(), priority, groupVM)); } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < vmCapacityReleaseInterval) { Integer priority = adjustHostPriority(plan, groupVM.getLastHostId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Updated host %s priority to %s , since VM %s" + + logger.debug(String.format("Updated host %s priority to %s, since VM %s" + " is present on the host, in %s state but has reserved capacity", - groupVM.getLastHostId(), priority, groupVM.getId(), groupVM.getState())); + groupVM.getLastHostId(), priority, groupVM, groupVM.getState())); } } } diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java index c87ff3dfc82..e93b8df39e9 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java @@ -98,11 +98,11 @@ public class SiocManagerImpl implements SiocManager { } if (storagePool.getDataCenterId() != zoneId) { - throw new Exception("Error: Storage pool '" + storagePool.getName() + "' is not in zone ID " + zoneId + "."); + throw new Exception(String.format("Error: Storage pool %s is not in zone %s.", storagePool, zone)); } if (!storagePool.getPoolType().equals(StoragePoolType.VMFS)) { - throw new Exception("Error: Storage pool '" + storagePool.getName() + "' does not represent a VMFS datastore."); + throw new Exception(String.format("Error: Storage pool %s does not represent a VMFS datastore.", storagePool)); } String lockName = zone.getUuid() + "-" + storagePool.getUuid(); @@ -193,7 +193,7 @@ public class SiocManagerImpl implements SiocManager { ManagedObjectReference morVm = nameToVm.get(vmName); if (morVm == null) { - String errMsg = "Error: The VM with ID " + instanceId + " could not be located (ManagedObjectReference)."; + String errMsg = String.format("Error: The VM %s could not be located (ManagedObjectReference).", vmInstance); throw new Exception(errMsg); } @@ -336,7 +336,7 @@ public class SiocManagerImpl implements SiocManager { } private String getInfoMsg(Volume volume, Integer newShares, Long newLimitIops) { - String msgPrefix = "VMware SIOC: Volume = " + volume.getName(); + String msgPrefix = String.format("VMware SIOC: Volume %s", volume); String msgNewShares = newShares != null ? "; New Shares = " + newShares : ""; @@ -354,8 +354,7 @@ public class SiocManagerImpl implements SiocManager { List volumes = volumeDao.findByInstance(vmInstance.getId()); if (volumes == null || volumes.size() == 0) { - String errMsg = "Error: The VMware virtual disk '" + disk + "' could not be mapped to a CloudStack volume. " + - "There were no volumes for the VM with the following ID: " + vmInstance.getId() + "."; + String errMsg = String.format("Error: The VMware virtual disk '%s' could not be mapped to a CloudStack volume. There were no volumes for the VM: %s.", disk, vmInstance); throw new Exception(errMsg); } diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index f162c51a703..d4b3cff0f5c 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -67,20 +67,20 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { - logger.debug("Creating VM backup for VM " + vm.getInstanceName() + " from backup offering " + backupOffering.getName()); + logger.debug("Creating VM backup for VM {} from backup offering {}", vm, backupOffering); ((VMInstanceVO) vm).setBackupExternalId("dummy-external-backup-id"); return true; } @Override public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { - logger.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring vm {} from backup {} on the Dummy Backup Provider", vm, backup); return true; } @Override public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { - logger.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring volume {} from backup {} on the Dummy Backup Provider", volumeUuid, backup); throw new CloudRuntimeException("Dummy plugin does not support this feature"); } @@ -101,7 +101,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean removeVMFromBackupOffering(VirtualMachine vm) { - logger.debug("Removing VM ID " + vm.getUuid() + " from backup offering by the Dummy Backup Provider"); + logger.debug(String.format("Removing VM %s from backup offering by the Dummy Backup Provider", vm)); return true; } @@ -112,7 +112,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean takeBackup(VirtualMachine vm) { - logger.debug("Starting backup for VM ID " + vm.getUuid() + " on Dummy provider"); + logger.debug(String.format("Starting backup for VM %s on Dummy provider", vm)); BackupVO backup = new BackupVO(); backup.setVmId(vm.getId()); diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java index 4a6725abdca..5d3d1a91933 100644 --- a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -107,7 +107,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co // Try to find any Up host in the same cluster for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(host.getClusterId())) { if (hostInCluster.getStatus() == Status.Up) { - LOG.debug("Found Host " + hostInCluster.getName()); + LOG.debug("Found Host {}", hostInCluster); return hostInCluster; } } @@ -115,7 +115,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co // Try to find any Host in the zone for (final HostVO hostInZone : hostDao.listByDataCenterIdAndHypervisorType(host.getDataCenterId(), Hypervisor.HypervisorType.KVM)) { if (hostInZone.getStatus() == Status.Up) { - LOG.debug("Found Host " + hostInZone.getName()); + LOG.debug("Found Host {}", hostInZone); return hostInZone; } } @@ -213,7 +213,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co List backedVolumes = backup.getBackedUpVolumes(); List volumes = backedVolumes.stream().map(volume -> volumeDao.findByUuid(volume.getUuid())).collect(Collectors.toList()); - LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm.getUuid(), backup.getUuid()); + LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup); BackupRepository backupRepository = getBackupRepository(vm, backup); final Host host = getLastVMHypervisorHost(vm); @@ -263,7 +263,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co Optional matchingVolume = getBackedUpVolumeInfo(backupSourceVm.getBackupVolumeList(), volumeUuid); Long backedUpVolumeSize = matchingVolume.isPresent() ? matchingVolume.get().getSize() : 0L; - LOG.debug("Restoring vm volume" + volumeUuid + "from backup " + backup.getUuid() + " on the NAS Backup Provider"); + LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", volume, backup); BackupRepository backupRepository = getBackupRepository(backupSourceVm, backup); VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), @@ -377,8 +377,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co vmBackupProtectedSize += backup.getProtectedSize(); } Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); - LOG.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); + LOG.debug("Metrics for VM {} is [backup size: {}, data size: {}].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize()); metrics.put(vm, vmBackupMetric); } return metrics; diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index 0e87ad33887..393e2911ac3 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -173,7 +173,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid List altClusterHosts = hostDao.findHypervisorHostInCluster(host.getClusterId()); for (final HostVO candidateClusterHost : altClusterHosts) { if ( candidateClusterHost.getStatus() == Status.Up ) { - LOG.debug("Found Host " + candidateClusterHost.getName()); + LOG.debug(String.format("Found Host %s", candidateClusterHost)); return candidateClusterHost; } } @@ -182,7 +182,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid List altZoneHosts = hostDao.findByDataCenterId(host.getDataCenterId()); for (final HostVO candidateZoneHost : altZoneHosts) { if ( candidateZoneHost.getStatus() == Status.Up && candidateZoneHost.getHypervisorType() == Hypervisor.HypervisorType.KVM ) { - LOG.debug("Found Host " + candidateZoneHost.getName()); + LOG.debug("Found Host " + candidateZoneHost); return candidateZoneHost; } } @@ -331,7 +331,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid final NetworkerBackup networkerBackup=getClient(zoneId).getNetworkerBackupInfo(externalBackupId); final String SSID = networkerBackup.getShortId(); - LOG.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Networker Backup Provider"); + LOG.debug(String.format("Restoring vm %s from backup %s on the Networker Backup Provider", vm, backup)); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -359,7 +359,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug("Starting Restore for VM ID " + vm.getUuid() + " and SSID" + SSID + " at " + restoreJobStart); + LOG.debug(String.format("Starting Restore for VM %s and %s at %s", vm, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); @@ -387,7 +387,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid final String destinationNetworkerClient = hostVO.getName().split("\\.")[0]; Long restoredVolumeDiskSize = 0L; - LOG.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Networker Backup Provider"); + LOG.debug(String.format("Restoring volume %s with uuid %s from backup %s on the Networker Backup Provider", volume, volumeUuid, backup)); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -448,7 +448,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug("Starting Restore for Volume UUID " + volume.getUuid() + " and SSID" + SSID + " at " + restoreJobStart); + LOG.debug(String.format("Starting Restore for Volume UUID %s and SSID %s at %s", volume, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); @@ -505,18 +505,18 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid if ( Boolean.TRUE.equals(NetworkerClientVerboseLogs.value()) ) script.add("-v"); - LOG.debug("Starting backup for VM ID " + vm.getUuid() + " on Networker provider"); + LOG.debug("Starting backup for VM {} on Networker provider", vm); Date backupJobStart = new Date(); String saveTime = executeBackupCommand(hostVO, credentials.first(), credentials.second(), script.toString()); - LOG.info ("EMC Networker finished backup job for vm " + vm.getName() + " with saveset Time: " + saveTime); + LOG.info("EMC Networker finished backup job for vm {} with saveset Time: {}", vm, saveTime); BackupVO backup = getClient(vm.getDataCenterId()).registerBackupForVm(vm, backupJobStart, saveTime); if (backup != null) { backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); backupDao.persist(backup); return true; } else { - LOG.error("Could not register backup for vm " + vm.getName() + " with saveset Time: " + saveTime); + LOG.error("Could not register backup for vm {} with saveset Time: {}", vm, saveTime); // We need to handle this rare situation where backup is successful but can't be registered properly. return false; } @@ -558,8 +558,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid vmBackupProtectedSize+= vmNwBackup.getSize().getValue() / 1024L; } Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); - LOG.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); + LOG.debug(String.format("Metrics for VM [%s] is [backup size: %s, data size: %s].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); metrics.put(vm, vmBackupMetric); } return metrics; @@ -578,14 +577,14 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid Long vmBackupSize=0L; boolean backupExists = false; for (final Backup backupInDb : backupsInDb) { - LOG.debug("Checking if Backup with external ID " + backupInDb.getName() + " for VM " + backupInDb.getVmId() + "is valid"); + LOG.debug(String.format("Checking if Backup %s with external ID %s for VM %s is valid", backupsInDb, backupInDb.getName(), vm)); if ( networkerBackupId.equals(backupInDb.getExternalId()) ) { - LOG.debug("Found Backup with id " + backupInDb.getId() + " in both Database and Networker"); + LOG.debug(String.format("Found Backup %s in both Database and Networker", backupInDb)); backupExists = true; removeList.remove(backupInDb.getId()); if (metric != null) { - LOG.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backupInDb.getUuid(), backupInDb.getExternalId(), backupInDb.getSize(), backupInDb.getProtectedSize(), + LOG.debug(String.format("Update backup [%s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", + backupInDb, backupInDb.getSize(), backupInDb.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); ((BackupVO) backupInDb).setSize(metric.getBackupSize()); ((BackupVO) backupInDb).setProtectedSize(metric.getDataSize()); @@ -627,12 +626,12 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid strayBackup.setAccountId(vm.getAccountId()); strayBackup.setDomainId(vm.getDomainId()); strayBackup.setZoneId(vm.getDataCenterId()); - LOG.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " - + "domain_id: %s, zone_id: %s].", strayBackup.getUuid(), strayBackup.getVmId(), strayBackup.getExternalId(), + LOG.debug(String.format("Creating a new entry in backups: [id: %s, uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " + + "domain_id: %s, zone_id: %s].", strayBackup.getId(), strayBackup.getUuid(), strayBackup.getVmId(), strayBackup.getExternalId(), strayBackup.getType(), strayBackup.getDate(), strayBackup.getBackupOfferingId(), strayBackup.getAccountId(), strayBackup.getDomainId(), strayBackup.getZoneId())); backupDao.persist(strayBackup); - LOG.warn("Added backup found in provider with ID: [" + strayBackup.getId() + "]"); + LOG.warn("Added backup found in provider [" + strayBackup + "]"); } else { LOG.debug ("Backup is in progress, skipping addition for this run"); } diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java index 8aecaa26023..36bfd456475 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java @@ -298,7 +298,7 @@ public class NetworkerClient { public ArrayList getBackupsForVm(VirtualMachine vm) { SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyy-MM-dd'T'HH:mm:ss"); - LOG.debug("Trying to list EMC Networker backups for VM " + vm.getName()); + LOG.debug(String.format("Trying to list EMC Networker backups for VM %s", vm)); try { final HttpResponse response = get("/global/backups/?q=name:" + vm.getName()); checkResponseOK(response); @@ -310,7 +310,7 @@ public class NetworkerClient { return backupsTaken; } for (final NetworkerBackup backup : networkerBackups.getBackups()) { - LOG.debug("Found Backup " + backup.getId()); + LOG.debug(String.format("Found Backup %s", backup)); // Backups that have expired on the EMC Networker but not removed yet will not be added try { Date backupRetentionTime = formatterDateTime.parse(backup.getRetentionTime()); @@ -345,7 +345,7 @@ public class NetworkerClient { return policies; } for (final ProtectionPolicy protectionPolicy : protectionPolicies.getProtectionPolicies()) { - LOG.debug("Found Protection Policy:" + protectionPolicy.getName()); + LOG.debug(String.format("Found Protection Policy: %s", protectionPolicy)); policies.add(new NetworkerBackupOffering(protectionPolicy.getName(), protectionPolicy.getResourceId().getId())); } return policies; diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index 4750e3264aa..c120d8bd599 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -173,7 +173,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, final String clonedJobName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); if (!client.cloneVeeamJob(parentJob, clonedJobName)) { - logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " but will check the list of jobs again if it was eventually succeeded."); + logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering [id: {}, name: {}] but will check the list of jobs again if it was eventually succeeded.", backupOffering.getExternalId(), backupOffering.getName()); } for (final BackupOffering job : client.listJobs()) { @@ -182,7 +182,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, if (BooleanUtils.isTrue(clonedJob.getScheduleConfigured()) && !clonedJob.getScheduleEnabled()) { client.toggleJobSchedule(clonedJob.getId()); } - logger.debug("Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " found, now trying to assign the VM to the job."); + logger.debug("Veeam job (backup offering) for backup offering [id: {}, name: {}] found, now trying to assign the VM to the job.", backupOffering.getExternalId(), backupOffering.getName()); final VmwareDatacenter vmwareDC = findVmwareDatacenterForVM(vm); if (client.addVMToVeeamJob(job.getExternalId(), vm.getInstanceName(), vmwareDC.getVcenterHost())) { ((VMInstanceVO) vm).setBackupExternalId(job.getExternalId()); @@ -229,7 +229,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, public boolean deleteBackup(Backup backup, boolean forced) { VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); if (vm == null) { - throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), backup.getExternalId())); + throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, name: %s, externalId: %s].", backup.getUuid(), backup.getName(), backup.getExternalId())); } if (!forced) { logger.debug(String.format("Veeam backup provider does not have a safe way to remove a single restore point, which results in all backup chain being removed. " @@ -315,8 +315,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, } Metric metric = backendMetrics.get(vm.getUuid()); - logger.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), metric.getBackupSize(), metric.getDataSize())); + logger.debug("Metrics for VM [{}] is [backup size: {}, data size: {}].", vm, + metric.getBackupSize(), metric.getDataSize()); metrics.put(vm, metric); } return metrics; @@ -331,8 +331,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, for (final Backup backup : backupsInDb) { if (restorePoint.getId().equals(backup.getExternalId())) { if (metric != null) { - logger.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backup.getUuid(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); + logger.debug("Update backup with [id: {}, uuid: {}, name: {}, external id: {}] from [size: {}, protected size: {}] to [size: {}, protected size: {}].", + backup.getId(), backup.getUuid(), backup.getName(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize()); ((BackupVO) backup).setSize(metric.getBackupSize()); ((BackupVO) backup).setProtectedSize(metric.getDataSize()); @@ -348,7 +348,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, public void syncBackups(VirtualMachine vm, Backup.Metric metric) { List restorePoints = listRestorePoints(vm); if (CollectionUtils.isEmpty(restorePoints)) { - logger.debug(String.format("Can't find any restore point to VM: [uuid: %s, name: %s].", vm.getUuid(), vm.getInstanceName())); + logger.debug("Can't find any restore point to VM: {}", vm); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -379,9 +379,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); - logger.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " - + "domain_id: %s, zone_id: %s].", backup.getUuid(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), - backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId())); + logger.debug("Creating a new entry in backups: [id: {}, uuid: {}, name: {}, vm_id: {}, external_id: {}, type: {}, date: {}, backup_offering_id: {}, account_id: {}, " + + "domain_id: {}, zone_id: {}].", backup.getId(), backup.getUuid(), backup.getName(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId()); backupDao.persist(backup); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_BACKUP_CREATE, diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java index 1f2de8f3196..cbfe2fda592 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java @@ -61,7 +61,7 @@ public class VeeamBackupProviderTest { backupProvider.deleteBackup(backup, false); } catch (Exception e) { assertEquals(CloudRuntimeException.class, e.getClass()); - String expected = String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), "abc"); + String expected = String.format("Could not find any VM associated with the Backup [uuid: %s, name: null, externalId: %s].", backup.getUuid(), "abc"); assertEquals(expected , e.getMessage()); } } diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 4f1db396b7c..cd7dc2bbbad 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -142,7 +142,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(zoneId); //check if zone is dedicated if (dedicatedZone != null) { - logger.error("Zone " + dc.getName() + " is already dedicated"); + logger.error(String.format("Zone %s is already dedicated", dc)); throw new CloudRuntimeException("Zone " + dc.getName() + " is already dedicated"); } @@ -161,7 +161,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dPod.getAccountId().equals(accountId)) { podsToRelease.add(dPod); } else { - logger.error("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Pod %s under this Zone %s is dedicated to different account/domain", pod, dc)); throw new CloudRuntimeException("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -187,7 +187,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - logger.error("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Cluster %s under this Zone %s is dedicated to different account/domain", cluster, dc)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } @@ -214,7 +214,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Host %s under this Zone %s is dedicated to different account/domain", host, dc)); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -292,7 +292,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { DedicatedResourceVO dedicatedZoneOfPod = _dedicatedDao.findByZoneId(pod.getDataCenterId()); //check if pod is dedicated if (dedicatedPod != null) { - logger.error("Pod " + pod.getName() + " is already dedicated"); + logger.error(String.format("Pod %s is already dedicated", pod)); throw new CloudRuntimeException("Pod " + pod.getName() + " is already dedicated"); } @@ -302,7 +302,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedZoneOfPod.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfPod.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(pod.getDataCenterId()); - logger.error("Cannot dedicate Pod. Its zone is already dedicated"); + logger.error(String.format("Cannot dedicate Pod. Its zone %s is already dedicated", zone)); throw new CloudRuntimeException("Pod's Zone " + zone.getName() + " is already dedicated"); } } @@ -323,7 +323,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - logger.error("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Cluster %s under this Pod %s is dedicated to different account/domain", cluster, pod)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } @@ -350,7 +350,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Host %s under this Pod %s is dedicated to different account/domain", host, pod)); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -421,7 +421,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { //check if cluster is dedicated if (dedicatedCluster != null) { - logger.error("Cluster " + cluster.getName() + " is already dedicated"); + logger.error(String.format("Cluster %s is already dedicated", cluster)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " is already dedicated"); } @@ -430,8 +430,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { //can dedicate a cluster to an account/domain if pod is dedicated to parent-domain if (dedicatedPodOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - logger.error("Cannot dedicate Cluster. Its Pod is already dedicated"); HostPodVO pod = _podDao.findById(cluster.getPodId()); + logger.error(String.format("Cannot dedicate Cluster %s. Its Pod %s is already dedicated", cluster, pod)); throw new CloudRuntimeException("Cluster's Pod " + pod.getName() + " is already dedicated"); } } @@ -441,8 +441,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { //can dedicate a cluster to an account/domain if zone is dedicated to parent-domain if (dedicatedZoneOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - logger.error("Cannot dedicate Cluster. Its zone is already dedicated"); DataCenterVO zone = _zoneDao.findById(cluster.getDataCenterId()); + logger.error(String.format("Cannot dedicate Cluster %s. Its zone %s is already dedicated", cluster, zone)); throw new CloudRuntimeException("Cluster's Zone " + zone.getName() + " is already dedicated"); } } @@ -463,7 +463,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); + logger.error(String.format("Cannot dedicate Cluster %s to account %s", cluster, accountName)); throw new CloudRuntimeException("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); } } else { @@ -536,7 +536,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { DedicatedResourceVO dedicatedZoneOfHost = _dedicatedDao.findByZoneId(host.getDataCenterId()); if (dedicatedHost != null) { - logger.error("Host " + host.getName() + " is already dedicated"); + logger.error(String.format("Host %s is already dedicated", host)); throw new CloudRuntimeException("Host " + host.getName() + " is already dedicated"); } @@ -546,7 +546,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedClusterOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedClusterOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - logger.error("Host's Cluster " + cluster.getName() + " is already dedicated"); + logger.error(String.format("Host's Cluster %s is already dedicated", cluster)); throw new CloudRuntimeException("Host's Cluster " + cluster.getName() + " is already dedicated"); } } @@ -557,7 +557,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedPodOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { HostPodVO pod = _podDao.findById(host.getPodId()); - logger.error("Host's Pod " + pod.getName() + " is already dedicated"); + logger.error(String.format("Host's Pod %s is already dedicated", pod)); throw new CloudRuntimeException("Host's Pod " + pod.getName() + " is already dedicated"); } } @@ -568,7 +568,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedZoneOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(host.getDataCenterId()); - logger.error("Host's Data Center " + zone.getName() + " is already dedicated"); + logger.error(String.format("Host's Data Center %s is already dedicated", zone)); throw new CloudRuntimeException("Host's Data Center " + zone.getName() + " is already dedicated"); } } @@ -576,7 +576,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); - checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, hostId); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -662,22 +662,22 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return vms; } - private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, Host host) { boolean suitable = true; - List allVmsOnHost = getVmsOnHost(hostId); + List allVmsOnHost = getVmsOnHost(host.getId()); if (accountId != null) { for (UserVmVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); - throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another account", host)); + throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); } } } else { for (UserVmVO vm : allVmsOnHost) { if (!domainIds.contains(vm.getDomainId())) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); - throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another domain", host)); + throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); } } @@ -688,7 +688,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { boolean suitable = true; for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host.getId()); + checkHostSuitabilityForExplicitDedication(accountId, domainIds, host); } return suitable; } @@ -939,7 +939,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { public void doInTransactionWithoutResult(TransactionStatus status) { Long resourceId = resourceFinal.getId(); if (!_dedicatedDao.remove(resourceId)) { - throw new CloudRuntimeException("Failed to delete Resource " + resourceId); + throw new CloudRuntimeException(String.format("Failed to delete Resource %s", resourceFinal)); } if (zoneId != null) { // remove the domainId set in zone diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index bd1bcf06101..b971b3b8596 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -156,13 +156,16 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy for (VMInstanceVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " + "running instances of another account"); + logger.info(String.format("Host %d for vm %s found to be unsuitable for " + + "implicit dedication as it is running instances of another account", + vm.getHostId(), vm)); suitable = false; break; } else { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it " + - "is running instances of this account which haven't been created using implicit dedication."); + logger.info(String.format("Host %d for vm %s found to be unsuitable for " + + "implicit dedication as it is running instances of this account which" + + " haven't been created using implicit dedication.", vm.getHostId(), vm)); suitable = false; break; } @@ -177,11 +180,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy return false; for (VMInstanceVO vm : allVmsOnHost) { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { - logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit."); + logger.info(String.format("Host %d found to be running a vm %s created by a planner other than implicit.", vm.getHostId(), vm)); createdByImplicitStrict = false; break; } else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) { - logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode."); + logger.info(String.format("Host %d found to be running a vm %s created by an implicit planner in preferred mode.", vm.getHostId(), vm)); createdByImplicitStrict = false; break; } diff --git a/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java b/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java index c799ac872c0..b6a5ed1aac1 100644 --- a/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java +++ b/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.VirtualMachine; @@ -39,20 +40,21 @@ public class Balanced extends AdapterBase implements ClusterDrsAlgorithm { private static final Logger logger = LogManager.getLogger(Balanced.class); @Override - public boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException { + public boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException { + long clusterId = cluster.getId(); double threshold = getThreshold(clusterId); Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); String drsMetric = ClusterDrsAlgorithm.getClusterDrsMetric(clusterId); String metricType = ClusterDrsAlgorithm.getDrsMetricType(clusterId); Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId); if (imbalance > threshold) { - logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio)); + logger.debug("Cluster {} needs DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio); return true; } else { - logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio)); + logger.debug("Cluster {} does not need DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio); return false; } } @@ -67,15 +69,15 @@ public class Balanced extends AdapterBase implements ClusterDrsAlgorithm { } @Override - public Ternary getMetrics(long clusterId, VirtualMachine vm, + public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); + Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); - logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s", - clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid())); + logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); // This needs more research to determine the cost and benefit of a migration // TODO: Cost should be a factor of the VM size and the host capacity diff --git a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java index a1562b52e38..d5160671958 100644 --- a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java +++ b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.cluster; +import com.cloud.dc.ClusterVO; import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.Ternary; @@ -61,6 +62,8 @@ public class BalancedTest { ServiceOfferingVO serviceOffering; + ClusterVO cluster; + long clusterId = 1L; Map> hostVmMap; @@ -73,6 +76,7 @@ public class BalancedTest { public void setUp() throws NoSuchFieldException, IllegalAccessException { closeable = MockitoAnnotations.openMocks(this); + cluster = Mockito.mock(ClusterVO.class); vm1 = Mockito.mock(VirtualMachine.class); vm2 = Mockito.mock(VirtualMachine.class); @@ -84,10 +88,10 @@ public class BalancedTest { hostVmMap.put(2L, Arrays.asList(vm2, vm3)); serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(cluster.getId()).thenReturn(clusterId); Mockito.when(vm3.getHostId()).thenReturn(2L); - Mockito.when(destHost.getId()).thenReturn(1L); - Mockito.when(serviceOffering.getCpu()).thenReturn(1); Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); Mockito.when(serviceOffering.getRamSize()).thenReturn(1024); @@ -133,7 +137,7 @@ public class BalancedTest { @Test public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - assertFalse(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertFalse(balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* @@ -143,14 +147,14 @@ public class BalancedTest { @Test public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - assertTrue(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertTrue(balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* 3. cluster with "unknown" metric */ @Test public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown"); - assertThrows(ConfigurationException.class, () -> balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertThrows(ConfigurationException.class, () -> balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /** @@ -179,7 +183,7 @@ public class BalancedTest { @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - Ternary result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.0, result.first(), 0.01); assertEquals(0.0, result.second(), 0.0); @@ -193,7 +197,7 @@ public class BalancedTest { @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - Ternary result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); diff --git a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java index 3a8befa628b..70c5acd951f 100644 --- a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java +++ b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.VirtualMachine; @@ -40,8 +41,9 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { private static final Logger logger = LogManager.getLogger(Condensed.class); @Override - public boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException { + public boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException { + long clusterId = cluster.getId(); double threshold = getThreshold(clusterId); Float skipThreshold = ClusterDrsImbalanceSkipThreshold.valueIn(clusterId); Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, skipThreshold); @@ -50,12 +52,12 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId); if (imbalance < threshold) { - logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold)); + logger.debug("Cluster {} needs DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {} SkipThreshold: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold); return true; } else { - logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold)); + logger.debug("Cluster {} does not need DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {} SkipThreshold: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold); return false; } } @@ -70,16 +72,16 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { } @Override - public Ternary getMetrics(long clusterId, VirtualMachine vm, + public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), + Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); - logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s", - clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid())); + logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); // This needs more research to determine the cost and benefit of a migration // TODO: Cost should be a factor of the VM size and the host capacity diff --git a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java index d5072774534..3d3896704da 100644 --- a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java +++ b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.cluster; +import com.cloud.dc.ClusterVO; import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.Ternary; @@ -61,6 +62,8 @@ public class CondensedTest { ServiceOfferingVO serviceOffering; + ClusterVO cluster; + long clusterId = 1L; Map> hostVmMap; @@ -74,6 +77,8 @@ public class CondensedTest { public void setUp() throws NoSuchFieldException, IllegalAccessException { closeable = MockitoAnnotations.openMocks(this); + cluster = Mockito.mock(ClusterVO.class); + vm1 = Mockito.mock(VirtualMachine.class); vm2 = Mockito.mock(VirtualMachine.class); vm3 = Mockito.mock(VirtualMachine.class); // vm to migrate @@ -84,10 +89,10 @@ public class CondensedTest { hostVmMap.put(2L, Arrays.asList(vm2, vm3)); serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(cluster.getId()).thenReturn(clusterId); Mockito.when(vm3.getHostId()).thenReturn(2L); - Mockito.when(destHost.getId()).thenReturn(1L); - Mockito.when(serviceOffering.getCpu()).thenReturn(1); Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); Mockito.when(serviceOffering.getRamSize()).thenReturn(512); @@ -134,7 +139,7 @@ public class CondensedTest { @Test public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - assertTrue(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertTrue(condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* @@ -144,14 +149,14 @@ public class CondensedTest { @Test public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - assertFalse(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertFalse(condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* 3. cluster with "unknown" metric */ @Test public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown"); - assertThrows(ConfigurationException.class, () -> condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertThrows(ConfigurationException.class, () -> condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /** @@ -180,7 +185,7 @@ public class CondensedTest { @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - Ternary result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.0, result.first(), 0.0); assertEquals(0, result.second(), 0.0); @@ -194,7 +199,7 @@ public class CondensedTest { @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - Ternary result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(-0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java index 58b265a99c0..97d00c45e4d 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java @@ -103,8 +103,7 @@ public class WebhookServiceImpl extends ManagerBase implements WebhookService, W return jobs; } if (event.getResourceAccountId() == null) { - logger.warn("Skipping delivering event [ID: {}, description: {}] to any webhook as account ID is missing", - event.getEventId(), event.getDescription()); + logger.warn("Skipping delivering event {} to any webhook as account ID is missing", event); throw new EventBusException(String.format("Account missing for the event ID: %s", event.getEventUuid())); } List domainIds = new ArrayList<>(); @@ -327,7 +326,7 @@ public class WebhookServiceImpl extends ManagerBase implements WebhookService, W } long deliveriesLimit = WebhookDeliveriesLimit.value(); logger.debug("Clearing old deliveries for webhooks with limit={} using management server {}", - deliveriesLimit, msHost.getMsid()); + deliveriesLimit, msHost); long processed = cleanupOldWebhookDeliveries(deliveriesLimit); logger.debug("Cleared old deliveries with limit={} for {} webhooks", deliveriesLimit, processed); } catch (Exception e) { diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java index e36f870c8d9..f0fb3e1cc9b 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java @@ -173,8 +173,8 @@ public class WebhookDeliveryJoinVO extends BaseViewVO implements InternalIdentit @Override public String toString() { - return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "webhookId", "startTime", "success")); + return String.format("WebhookDelivery %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "webhookId", "webhookName", "startTime", "success")); } public WebhookDeliveryJoinVO() { diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java index e39f57a2663..e266ea5d7c4 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java @@ -130,7 +130,7 @@ public class WebhookDeliveryVO implements WebhookDelivery { @Override public String toString() { - return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("WebhookDelivery %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "webhookId", "startTime", "success")); } diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java index f1708609587..9ff15d34a9c 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java @@ -225,7 +225,7 @@ public class WebhookJoinVO implements ControlledViewEntity { @Override public String toString() { - return String.format("Webhook [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("Webhook %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "name")); } diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java index 93e3e801423..852cdf740d1 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java @@ -191,7 +191,7 @@ public class WebhookVO implements Webhook { @Override public String toString() { - return String.format("Webhook [%s]",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("Webhook %s",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "name", "payloadUrl")); } diff --git a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index a71ae26e670..53e44ab5aab 100644 --- a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -75,7 +75,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { if (logger.isDebugEnabled()) { logger.debug(String.format("Found %d hosts %s with type: %s, zone ID: %d, pod ID: %d, cluster ID: %s, offering host tag(s): %s, template tag: %s", taggedHosts.size(), - (taggedHosts.isEmpty() ? "" : String.format("(%s)", StringUtils.join(taggedHosts.stream().map(HostVO::getId).toArray(), ","))), + (taggedHosts.isEmpty() ? "" : String.format("(%s)", StringUtils.join(taggedHosts.stream().map(HostVO::toString).toArray(), ","))), type.name(), dcId, podId, clusterId, offeringHostTag, templateTag)); } return taggedHosts; @@ -139,19 +139,19 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { } if (avoid.shouldAvoid(host)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + logger.debug(String.format("Host %s is in avoid set, skipping this and trying other available hosts", host)); } continue; } Pair cpuCapabilityAndCapacity = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { if (logger.isDebugEnabled()) { - logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + logger.debug(String.format("Not using host %s; host has cpu capability? %s, host has capacity? %s", host, cpuCapabilityAndCapacity.first(), cpuCapabilityAndCapacity.second())); } continue; } if (logger.isDebugEnabled()) { - logger.debug("Found a suitable host, adding to list: " + host.getId()); + logger.debug(String.format("Found a suitable host, adding to list: %s", host)); } suitableHosts.add(host); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 321369b24b9..f918f66941e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -258,7 +258,7 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R List deadVms = _vmDao.listByLastHostId(host.getId()); for (VMInstanceVO vm : deadVms) { if (vm.getState() == State.Running || vm.getHostId() != null) { - throw new CloudRuntimeException("VM " + vm.getId() + "is still running on host " + host.getId()); + throw new CloudRuntimeException(String.format("VM %s is still running on host %s", vm, host)); } _vmDao.remove(vm.getId()); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index 318ac225c8c..83199b5f51c 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -76,12 +76,12 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (vm.getLastHostId() != null && haVmTag == null) { - HostVO h = _hostDao.findById(vm.getLastHostId()); - DataCenter dc = _dcDao.findById(h.getDataCenterId()); - Pod pod = _podDao.findById(h.getPodId()); - Cluster c = _clusterDao.findById(h.getClusterId()); - logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); - return new DeployDestination(dc, pod, c, h); + HostVO host = _hostDao.findById(vm.getLastHostId()); + DataCenter dc = _dcDao.findById(host.getDataCenterId()); + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + logger.debug("Start baremetal vm {} on last stayed host {}", vm, host); + return new DeployDestination(dc, pod, cluster, host); } if (haVmTag != null) { @@ -124,22 +124,22 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { if (haVmTag == null) { hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); } else { - logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + + logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster " + cluster + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId()); return null; } - for (HostVO h : hosts) { - long cluster_id = h.getClusterId(); + for (HostVO host : hosts) { + long cluster_id = host.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - logger.debug("Find host " + h.getId() + " has enough capacity"); - DataCenter dc = _dcDao.findById(h.getDataCenterId()); - Pod pod = _podDao.findById(h.getPodId()); - return new DeployDestination(dc, pod, cluster, h); + if (_capacityMgr.checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + logger.debug(String.format("Find host %s has enough capacity", host)); + DataCenter dc = _dcDao.findById(host.getDataCenterId()); + Pod pod = _podDao.findById(host.getPodId()); + return new DeployDestination(dc, pod, cluster, host); } } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java index bf991b77e1c..d90ea6c3731 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -98,10 +98,10 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage if (newState == State.Starting) { host.setDetail("vmName", vo.getInstanceName()); - logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); + logger.debug(String.format("Add vmName %s to host %s details", host.getDetail("vmName"), host)); } else { if (host.getDetail("vmName") != null && host.getDetail("vmName").equalsIgnoreCase(vo.getInstanceName())) { - logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); + logger.debug(String.format("Remove vmName %s from host %s details", host.getDetail("vmName"), host)); host.getDetails().remove("vmName"); } } @@ -142,13 +142,13 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage } if (State.Starting != vm.getState()) { - throw new CloudRuntimeException(String.format("baremetal instance[name:%s, state:%s] is not in state of Starting", vmName, vm.getState())); + throw new CloudRuntimeException(String.format("baremetal instance %s [state:%s] is not in state of Starting", vm, vm.getState())); } vm.setState(State.Running); vm.setLastHostId(vm.getHostId()); vmDao.update(vm.getId(), vm); - logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", - vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress())); + logger.debug(String.format("received baremetal provision done notification for vm %s running on host %s [mac:%s, ip:%s]", + vm, host, host.getPrivateMacAddress(), host.getPrivateIpAddress())); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java index d820fd5b6d3..3d79b9efdd1 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java @@ -66,7 +66,7 @@ public class HypervInvestigator extends AdapterBase implements Investigator { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId(), e); + logger.debug(String.format("Failed to send command to host: %s", neighbor), e); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java index 283f4dc0c96..a5947238bf6 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java @@ -136,7 +136,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer } if (logger.isDebugEnabled()) { - logger.debug("Setting up host " + agentId); + logger.debug(String.format("Setting up host %s", agent)); } HostEnvironment env = new HostEnvironment(); @@ -161,14 +161,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer if (reason == null) { reason = " details were null"; } - logger.warn("Unable to setup agent " + agentId + " due to " + reason); + logger.warn(String.format("Unable to setup agent %s due to %s", agent, reason)); } // Error handling borrowed from XcpServerDiscoverer, may need to be // updated. } catch (AgentUnavailableException e) { - logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn(String.format("Unable to setup agent %s because it became unavailable.", agent), e); } catch (OperationTimedoutException e) { - logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn(String.format("Unable to setup agent %s because it timed out", agent), e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -256,7 +256,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer } logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDirectConnectResource for zone/pod/cluster " + dcId + "/" + podId + "/" + - clusterId); + cluster); // Some Hypervisors organise themselves in pools. // The startup command tells us what pool they are using. @@ -391,7 +391,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer return null; } - logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); + logger.info(String.format("Host: %s connected with hypervisor type: %s. Checking CIDR...", host, HypervisorType.Hyperv)); HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java index c00ee70bf13..d488ee2058f 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java @@ -123,7 +123,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru { String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId); nicTo.setMac(mac); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId); + throw new CloudRuntimeException(String.format("unable to allocate mac address on network: %s", network.getUuid())); } nicTo.setDns1(profile.getIPv4Dns1()); nicTo.setDns2(profile.getIPv4Dns2()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java index 8fc74826242..eb64f4bc439 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java @@ -60,7 +60,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { return haManager.isVMAliveOnHost(host); } Status status = isAgentAlive(host); - logger.debug("HA: HOST is ineligible legacy state " + status + " for host " + host.getId()); + logger.debug("HA: HOST is ineligible legacy state {} for host {}", status, host); if (status == null) { throw new UnknownVM(); } @@ -88,8 +88,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { storageSupportHA = storageSupportHa(zonePools); } if (!storageSupportHA) { - logger.warn( - "Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation."); + logger.warn("Agent investigation was requested on host {}, but host does not support investigation because it has no NFS storage. Skipping investigation.", agent); return Status.Disconnected; } @@ -104,7 +103,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { hostStatus = answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + agent.getId()); + logger.debug("Failed to send command to host: {}", agent); } if (hostStatus == null) { hostStatus = Status.Disconnected; @@ -116,18 +115,18 @@ public class KVMInvestigator extends AdapterBase implements Investigator { || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; } - logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); + logger.debug("Investigating host:{} via neighbouring host:{}", agent, neighbor); try { Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); if (answer != null) { neighbourStatus = answer.getResult() ? Status.Down : Status.Up; - logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); + logger.debug("Neighbouring host:{} returned status:{} for the investigated host:{}", neighbor, neighbourStatus, agent); if (neighbourStatus == Status.Up) { break; } } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: {}", neighbor); } } if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { @@ -136,7 +135,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { if (neighbourStatus == Status.Down && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { hostStatus = Status.Down; } - logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId()); + logger.debug("HA: HOST is ineligible legacy state {} for host {}", hostStatus, agent); return hostStatus; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index fe6be10f9b8..92e4570170e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -278,13 +278,13 @@ public class KVMStorageProcessor implements StorageProcessor { String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) { - logger.warn("Failed to disconnect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to disconnect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName()); } } else { primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds()); @@ -427,17 +427,19 @@ public class KVMStorageProcessor implements StorageProcessor { String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { - logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect base template volume [id: {}, name: {}, path:" + + " {}], in storage pool [id: {}, name: {}]", template.getUuid(), + template.getName(), templatePath, primaryStore.getUuid(), primaryStore.getName()); } BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); if (BaseVol == null) { - logger.debug("Failed to get the physical disk for base template volume at path: " + templatePath); - throw new CloudRuntimeException("Failed to get the physical disk for base template volume at path: " + templatePath); + logger.debug("Failed to get the physical disk for base template volume [id: {}, name: {}, path: {}]", template.getUuid(), template.getName(), templatePath); + throw new CloudRuntimeException(String.format("Failed to get the physical disk for base template volume [id: %s, name: %s, path: %s]", template.getUuid(), template.getName(), templatePath)); } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect new volume at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName()); } BaseVol.setDispName(template.getName()); @@ -1096,10 +1098,10 @@ public class KVMStorageProcessor implements StorageProcessor { try { Files.deleteIfExists(Paths.get(snapshotPath)); } catch (IOException ex) { - logger.error(String.format("Failed to delete snapshot [%s] on primary storage [%s].", snapshotPath, primaryPool.getUuid()), ex); + logger.error("Failed to delete snapshot [{}] on primary storage [{}].", snapshot.getId(), snapshot.getName(), ex); } } else { - logger.debug(String.format("This backup is temporary, not deleting snapshot [%s] on primary storage [%s]", snapshotPath, primaryPool.getUuid())); + logger.debug("This backup is temporary, not deleting snapshot [{}] on primary storage [{}]", snapshot.getId(), snapshot.getName()); } } @@ -1551,14 +1553,15 @@ public class KVMStorageProcessor implements StorageProcessor { return new AttachAnswer(disk); } catch (final LibvirtException e) { - logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug(String.format("Failed to attach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", + vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); return new AttachAnswer(e.toString()); } catch (final InternalErrorException e) { - logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug(String.format("Failed to attach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); return new AttachAnswer(e.toString()); } catch (final CloudRuntimeException e) { - logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug(String.format("Failed to attach volume: [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); return new AttachAnswer(e.toString()); } finally { vol.clearPassphrase(); @@ -1588,14 +1591,8 @@ public class KVMStorageProcessor implements StorageProcessor { storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); return new DettachAnswer(disk); - } catch (final LibvirtException e) { - logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); - return new DettachAnswer(e.toString()); - } catch (final InternalErrorException e) { - logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); - return new DettachAnswer(e.toString()); - } catch (final CloudRuntimeException e) { - logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + } catch (final LibvirtException | InternalErrorException | CloudRuntimeException e) { + logger.debug(String.format("Failed to detach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); return new DettachAnswer(e.toString()); } finally { vol.clearPassphrase(); @@ -2134,7 +2131,7 @@ public class KVMStorageProcessor implements StorageProcessor { try { pool.getPhysicalDisk(vol.getPath()); } catch (final Exception e) { - logger.debug("can't find volume: " + vol.getPath() + ", return true"); + logger.debug(String.format("can't find volume: %s, return true", vol)); return new Answer(null); } pool.deletePhysicalDisk(vol.getPath(), vol.getFormat()); @@ -2429,7 +2426,7 @@ public class KVMStorageProcessor implements StorageProcessor { logger.debug("Checking for free space on the host for downloading the template with physical size: " + templateSize + " and virtual size: " + cmd.getTemplateSize()); if (!isEnoughSpaceForDownloadTemplateOnTemporaryLocation(templateSize)) { - String msg = "Not enough space on the defined temporary location to download the template " + cmd.getTemplateId(); + String msg = String.format("Not enough space on the defined temporary location to download the template %s with id %d", cmd.getDestData(), cmd.getTemplateId()); logger.error(msg); return new DirectDownloadAnswer(false, msg, true); } @@ -2452,7 +2449,8 @@ public class KVMStorageProcessor implements StorageProcessor { String destTemplatePath = (destTemplate != null) ? destTemplate.getPath() : null; if (!storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath, null)) { - logger.warn("Unable to connect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + logger.warn(String.format("Unable to connect physical disk at path: %s, in storage pool [id: %d, uuid: %s, name: %s, path: %s]", + destTemplatePath, pool.getId(), pool.getUuid(), pool.getName(), pool.getPath())); } template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destTemplatePath, destPool, cmd.getFormat(), cmd.getWaitInMillSeconds()); @@ -2465,7 +2463,7 @@ public class KVMStorageProcessor implements StorageProcessor { try { Files.deleteIfExists(Path.of(templatePath)); } catch (IOException ioException) { - logger.warn("Unable to remove file [{}]; consider removing it manually.", templatePath, ioException); + logger.warn("Unable to remove file [name: {}, path: {}]; consider removing it manually.", template.getName(), templatePath, ioException); } logger.error("The downloaded file [{}] is not a valid QCOW2.", templatePath, e); @@ -2474,10 +2472,10 @@ public class KVMStorageProcessor implements StorageProcessor { } if (!storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath)) { - logger.warn("Unable to disconnect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + logger.warn(String.format("Unable to disconnect physical disk at path: %s, in storage pool [id: %d, uuid: %s, name: %s, path: %s]", destTemplatePath, pool.getId(), pool.getUuid(), pool.getName(), pool.getUuid())); } } catch (CloudRuntimeException e) { - logger.warn("Error downloading template " + cmd.getTemplateId() + " due to: " + e.getMessage()); + logger.warn(String.format("Error downloading template %s with id %d due to: %s", cmd.getDestData(), cmd.getTemplateId(), e.getMessage())); return new DirectDownloadAnswer(false, "Unable to download template: " + e.getMessage(), true); } catch (IllegalArgumentException e) { return new DirectDownloadAnswer(false, "Unable to create direct downloader: " + e.getMessage(), true); @@ -2503,18 +2501,25 @@ public class KVMStorageProcessor implements StorageProcessor { KVMStoragePool destPool = null; try { - logger.debug("Copying src volume (id: " + srcVol.getId() + ", format: " + srcFormat + ", path: " + srcVolumePath + ", primary storage: [id: " + srcPrimaryStore.getId() + ", type: " + srcPrimaryStore.getPoolType() + "]) to dest volume (id: " + - destVol.getId() + ", format: " + destFormat + ", path: " + destVolumePath + ", primary storage: [id: " + destPrimaryStore.getId() + ", type: " + destPrimaryStore.getPoolType() + "])."); + logger.debug(String.format("Copying src volume (id: %d, uuid: %s, name: %s, format:" + + " %s, path: %s, primary storage: [id: %d, uuid: %s, name: %s, type: " + + "%s]) to dest volume (id: %d, uuid: %s, name: %s, format: %s, path: " + + "%s, primary storage: [id: %d, uuid: %s, name: %s, type: %s]).", + srcVol.getId(), srcVol.getUuid(), srcVol.getName(), srcFormat, srcVolumePath, + srcPrimaryStore.getId(), srcPrimaryStore.getUuid(), srcPrimaryStore.getName(), + srcPrimaryStore.getPoolType(), destVol.getId(), destVol.getUuid(), destVol.getName(), + destFormat, destVolumePath, destPrimaryStore.getId(), destPrimaryStore.getUuid(), + destPrimaryStore.getName(), destPrimaryStore.getPoolType())); if (srcPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath, srcPrimaryStore.getDetails())) { - logger.warn("Failed to connect src volume at path: " + srcVolumePath + ", in storage pool id: " + srcPrimaryStore.getUuid()); + logger.warn(String.format("Failed to connect src volume %s, in storage pool %s", srcVol, srcPrimaryStore)); } } final KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath); if (volume == null) { - logger.debug("Failed to get physical disk for volume: " + srcVolumePath); + logger.debug("Failed to get physical disk for volume: " + srcVol); throw new CloudRuntimeException("Failed to get physical disk for volume at path: " + srcVolumePath); } @@ -2525,7 +2530,7 @@ public class KVMStorageProcessor implements StorageProcessor { String destVolumeName = null; if (destPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { - logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); + logger.warn("Failed to connect dest volume {}, in storage pool {}", destVol, destPrimaryStore); } destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails()); } else { @@ -2544,7 +2549,8 @@ public class KVMStorageProcessor implements StorageProcessor { storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds()); } } catch (Exception e) { // Any exceptions while copying the disk, should send failed answer with the error message - String errMsg = String.format("Failed to copy volume: %s to dest storage: %s, due to %s", srcVol.getName(), destPrimaryStore.getName(), e.toString()); + String errMsg = String.format("Failed to copy volume [uuid: %s, name: %s] to dest storage [id: %s, name: %s], due to %s", + srcVol.getUuid(), srcVol.getName(), destPrimaryStore.getUuid(), destPrimaryStore.getName(), e.toString()); logger.debug(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 52adc59cbe7..560020cad38 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -20,14 +20,13 @@ import java.io.File; import java.util.List; import java.util.Map; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import org.libvirt.StoragePool; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import com.cloud.agent.api.to.HostTO; import com.cloud.agent.properties.AgentProperties; @@ -328,7 +327,7 @@ public class LibvirtStoragePool implements KVMStoragePool { @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString(); + return String.format("LibvirtStoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "path")); } @Override diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 19d8378eb78..0cf8ce0018d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -38,6 +38,7 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -604,7 +605,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { } public String toString() { - return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId()); + return String.format("AddressInfo %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "type", "address", "connectionId")); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java index 81daabf59d7..b937be5265b 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java @@ -73,12 +73,12 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.RESET, null); return resp.getSuccess(); } else { - logger.warn("OOBM recover operation failed for the host " + r.getName()); + logger.warn("OOBM recover operation failed for the host {}", r); return false; } } catch (Exception e){ - logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); - throw new HARecoveryException(" OOBM service is not configured or enabled for this host " + r.getName(), e); + logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); + throw new HARecoveryException(String.format(" OOBM service is not configured or enabled for this host %s", r), e); } } @@ -90,12 +90,12 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.OFF, null); return resp.getSuccess(); } else { - logger.warn("OOBM fence operation failed for this host " + r.getName()); + logger.warn("OOBM fence operation failed for this host {}", r); return false; } } catch (Exception e){ - logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); - throw new HAFenceException("OBM service is not configured or enabled for this host " + r.getName() , e); + logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); + throw new HAFenceException(String.format("OBM service is not configured or enabled for this host %s", r.getName()), e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index 10d684bbdd3..31f87d7e044 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -21,6 +21,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckOnHostCommand; import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand; +import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.host.Host; @@ -51,6 +52,8 @@ import java.util.List; public class KVMHostActivityChecker extends AdapterBase implements ActivityCheckerInterface, HealthCheckerInterface { + @Inject + private ClusterDao clusterDao; @Inject private VolumeDao volumeDao; @Inject @@ -67,7 +70,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck @Override public boolean isActive(Host r, DateTime suspectTime) throws HACheckerException { try { - return isVMActivtyOnHost(r, suspectTime); + return isVMActivityOnHost(r, suspectTime); } catch (HACheckerException e) { //Re-throwing the exception to avoid poluting the 'HACheckerException' already thrown throw e; @@ -146,7 +149,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck return hostStatus == Status.Up; } - private boolean isVMActivtyOnHost(Host agent, DateTime suspectTime) throws HACheckerException { + private boolean isVMActivityOnHost(Host agent, DateTime suspectTime) throws HACheckerException { if (agent.getHypervisorType() != Hypervisor.HypervisorType.KVM && agent.getHypervisorType() != Hypervisor.HypervisorType.LXC) { throw new IllegalStateException(String.format("Calling KVM investigator for non KVM Host of type [%s].", agent.getHypervisorType())); } @@ -155,7 +158,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck for (StoragePool pool : poolVolMap.keySet()) { activityStatus = verifyActivityOfStorageOnHost(poolVolMap, pool, agent, suspectTime, activityStatus); if (!activityStatus) { - logger.warn(String.format("It seems that the storage pool [%s] does not have activity on %s.", pool.getId(), agent.toString())); + logger.warn("It seems that the storage pool [{}] does not have activity on {}.", pool, agent); break; } } @@ -167,20 +170,20 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck List volume_list = poolVolMap.get(pool); final CheckVMActivityOnStoragePoolCommand cmd = new CheckVMActivityOnStoragePoolCommand(agent, pool, volume_list, suspectTime); - logger.debug(String.format("Checking VM activity for %s on storage pool [%s].", agent.toString(), pool.getId())); + logger.debug("Checking VM activity for {} on storage pool [{}].", agent.toString(), pool); try { Answer answer = storageManager.sendToPool(pool, getNeighbors(agent), cmd); if (answer != null) { activityStatus = !answer.getResult(); - logger.debug(String.format("%s %s activity on storage pool [%s]", agent.toString(), activityStatus ? "has" : "does not have", pool.getId())); + logger.debug("{} {} activity on storage pool [{}]", agent.toString(), activityStatus ? "has" : "does not have", pool); } else { - String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool.getId()); + String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool); logger.debug(message); throw new IllegalStateException(message); } } catch (StorageUnavailableException e){ - String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool.getId(), agent.toString()); + String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool, agent); logger.warn(message, e); throw new HACheckerException(message, e); } @@ -191,15 +194,15 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck List vm_list = vmInstanceDao.listByHostId(agent.getId()); List volume_list = new ArrayList(); for (VirtualMachine vm : vm_list) { - logger.debug(String.format("Retrieving volumes of VM [%s]...", vm.getId())); + logger.debug("Retrieving volumes of VM [{}]...", vm); List vm_volume_list = volumeDao.findByInstance(vm.getId()); volume_list.addAll(vm_volume_list); } HashMap> poolVolMap = new HashMap>(); for (Volume vol : volume_list) { - logger.debug(String.format("Retrieving storage pool [%s] of volume [%s]...", vol.getPoolId(), vol.getId())); StoragePool sp = storagePool.findById(vol.getPoolId()); + logger.debug("Retrieving storage pool [{}] of volume [{}]...", sp, vol); if (!poolVolMap.containsKey(sp)) { List list = new ArrayList(); list.add(vol); @@ -215,7 +218,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck public long[] getNeighbors(Host agent) { List neighbors = new ArrayList(); List cluster_hosts = resourceManager.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); - logger.debug(String.format("Retrieving all \"Up\" hosts from cluster [%s]...", agent.getClusterId())); + logger.debug("Retrieving all \"Up\" hosts from cluster [{}]...", clusterDao.findById(agent.getClusterId())); for (HostVO host : cluster_hosts) { if (host.getId() == agent.getId() || (host.getHypervisorType() != Hypervisor.HypervisorType.KVM && host.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java index 56a5b08810b..7114a841157 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java @@ -75,7 +75,7 @@ public class SimulatorInvestigator extends AdapterBase implements Investigator { return answer.getResult() ? Status.Up : Status.Down; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug(String.format("Failed to send command to host: %s", neighbor)); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java index 461e141fa3d..3885e06e740 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java @@ -242,7 +242,7 @@ class VmwareVmImplementer { String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId); nicTo.setMac(mac); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId); + throw new CloudRuntimeException(String.format("unable to allocate mac address on network %s with id %d", network, networkId)); } nicTo.setDns1(publicNicProfile.getIPv4Dns1()); nicTo.setDns2(publicNicProfile.getIPv4Dns2()); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 580d44a09d6..1be381dcd54 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -176,7 +176,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer if (hosts != null && hosts.size() > 0) { int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); if (hosts.size() >= maxHostsPerCluster) { - String msg = "VMware cluster " + cluster.getName() + " is too big to add new host, current size: " + hosts.size() + ", max. size: " + maxHostsPerCluster; + String msg = String.format("VMware cluster %s is too big to add new host, current size: %d, max. size: %d", cluster, hosts.size(), maxHostsPerCluster); logger.error(msg); throw new DiscoveredWithErrorException(msg); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java index 1e260b4f99b..c99d7d4d707 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -493,8 +493,8 @@ public class VmwareStorageProcessor implements StorageProcessor { boolean createSnapshot, String nfsVersion, String configuration) throws Exception { String secondaryMountPoint = mountService.getMountPoint(secondaryStorageUrl, nfsVersion); - logger.info(String.format("Init copy of template [name: %s, path in secondary storage: %s, configuration: %s] in secondary storage [url: %s, mount point: %s] to primary storage.", - templateName, templatePathAtSecondaryStorage, configuration, secondaryStorageUrl, secondaryMountPoint)); + logger.info(String.format("Init copy of template [uuid: %s, name: %s, path in secondary storage: %s, configuration: %s] in secondary storage [url: %s, mount point: %s] to primary storage.", + templateUuid, templateName, templatePathAtSecondaryStorage, configuration, secondaryStorageUrl, secondaryMountPoint)); String srcOVAFileName = VmwareStorageLayoutHelper.getTemplateOnSecStorageFilePath(secondaryMountPoint, templatePathAtSecondaryStorage, templateName, @@ -2534,7 +2534,7 @@ public class VmwareStorageProcessor implements StorageProcessor { if (vmMo != null) { if (logger.isInfoEnabled()) { if (deployAsIs) { - logger.info("Destroying root volume " + vol.getPath() + " of deploy-as-is VM " + vmName); + logger.info(String.format("Destroying root volume %s of deploy-as-is VM %s", vol, vmName)); } else { logger.info("Destroy root volume and VM itself. vmName " + vmName); } @@ -2585,7 +2585,7 @@ public class VmwareStorageProcessor implements StorageProcessor { } } else if (deployAsIs) { if (logger.isInfoEnabled()) { - logger.info("Destroying root volume " + vol.getPath() + " of already removed deploy-as-is VM " + vmName); + logger.info(String.format("Destroying root volume %s of already removed deploy-as-is VM %s", vol, vmName)); } // The disks of the deploy-as-is VM have been detached from the VM and moved to root folder String deployAsIsRootDiskPath = dsMo.searchFileInSubFolders(vol.getPath() + VmwareResource.VMDK_EXTENSION, @@ -3859,7 +3859,7 @@ public class VmwareStorageProcessor implements StorageProcessor { String volumePath = volumeTO.getPath(); if (!file.getFileBaseName().equalsIgnoreCase(volumePath)) { if (logger.isInfoEnabled()) { - logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumePath + " -> " + file.getFileBaseName()); + logger.info(String.format("Detected disk-chain top file change on volume: %s -> %s", volumeTO, file.getFileBaseName())); } volumePathChangeObserved = true; volumePath = file.getFileBaseName(); @@ -3871,7 +3871,7 @@ public class VmwareStorageProcessor implements StorageProcessor { if (diskDatastoreMoFromVM != null) { String actualPoolUuid = diskDatastoreMoFromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); if (!actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) { - logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumePath, actualPoolUuid)); + logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumeTO, actualPoolUuid)); datastoreChangeObserved = true; volumeTO.setDataStoreUuid(actualPoolUuid); volumeTO.setChainInfo(_gson.toJson(matchingExistingDisk)); diff --git a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java index caf28e849a0..76807b89d0f 100644 --- a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java +++ b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java @@ -196,8 +196,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { final Answer answer = agentMgr.easySend(destHost.getId(), cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to CreateStoragePoolCommand)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = String.format("Error interacting with host %s (related to CreateStoragePoolCommand)%s", destHost, (answer != null && StringUtils.isNotBlank(answer.getDetails())) ? ": " + answer.getDetails() : ""); logger.error(errMsg); @@ -238,8 +237,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { final Answer answer = agentMgr.easySend(srcHost.getId(), cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = String.format("Error interacting with host %s (related to DeleteStoragePoolCommand)%s", srcHost, (answer != null && StringUtils.isNotBlank(answer.getDetails())) ? ": " + answer.getDetails() : ""); logger.error(errMsg); @@ -281,8 +279,8 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { final Answer answer = agentMgr.easySend(destHost.getId(), cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to handleManagedVolumesAfterFailedMigration)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = String.format("Error interacting with host %s (related to handleManagedVolumesAfterFailedMigration)%s", + destHost, answer != null && (StringUtils.isNotBlank(answer.getDetails())) ? ": " + answer.getDetails() : ""); logger.error(errMsg); @@ -346,7 +344,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!receiveAnswer.getResult()) { - logger.error("Migration with storage of vm " + vm + " failed. Details: " + receiveAnswer.getDetails()); + logger.error(String.format("Migration with storage of vm %s to host %s failed. Details: %s", vm, destHost, receiveAnswer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } @@ -362,7 +360,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { } else if (!sendAnswer.getResult()) { handleManagedVolumesAfterFailedMigration(volumeToPool, destHost); - logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails()); + logger.error(String.format("Migration with storage of vm %s failed to host %s. Details: %s", vm, destHost, sendAnswer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } @@ -370,10 +368,10 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command); if (answer == null) { - logger.error("Migration with storage of vm " + vm + " failed."); + logger.error(String.format("Migration with storage of vm %s to host %s failed.", vm, destHost)); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error(String.format("Migration with storage of vm %s to host %s failed. Details: %s", vm, destHost, answer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else { // Update the volume details after migration. @@ -403,10 +401,10 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto); MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)agentMgr.send(destHost.getId(), command); if (answer == null) { - logger.error("Migration with storage of vm " + vm + " failed."); + logger.error(String.format("Migration with storage of vm %s to host %s failed.", vm, destHost)); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error(String.format("Migration with storage of vm %s to host %s failed. Details: %s", vm, destHost, answer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails()); } else { // Update the volume details after migration. diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java index 3c1f161dd20..d5725e88a3c 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java @@ -267,7 +267,7 @@ public class CloudianConnectorImpl extends ComponentLifecycleBase implements Clo try { final Account account = accountDao.findByIdIncludingRemoved((Long) args); if(!removeUserAccount(account)) { - logger.warn(String.format("Failed to remove account to Cloudian while removing CloudStack account=%s, id=%s", account.getAccountName(), account.getId())); + logger.warn("Failed to remove account to Cloudian while removing CloudStack account {}", account); } } catch (final Exception e) { logger.error("Caught exception while removing account in Cloudian: ", e); @@ -281,7 +281,7 @@ public class CloudianConnectorImpl extends ComponentLifecycleBase implements Clo try { final Domain domain = domainDao.findById((Long) args); if (!addGroup(domain)) { - logger.warn(String.format("Failed to add group in Cloudian while adding CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); + logger.warn("Failed to add group in Cloudian while adding CloudStack domain {}", domain); } } catch (final Exception e) { logger.error("Caught exception adding domain/group in Cloudian: ", e); @@ -295,7 +295,7 @@ public class CloudianConnectorImpl extends ComponentLifecycleBase implements Clo try { final DomainVO domain = (DomainVO) args; if (!removeGroup(domain)) { - logger.warn(String.format("Failed to remove group in Cloudian while removing CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); + logger.warn("Failed to remove group in Cloudian while removing CloudStack domain {}", domain); } } catch (final Exception e) { logger.error("Caught exception while removing domain/group in Cloudian: ", e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 71be8b1a475..477eb257dee 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -316,16 +316,16 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne return false; } if (networkOffering.getState() == NetworkOffering.State.Disabled) { - logger.warn(String.format("Network offering ID: %s is not enabled", networkOffering.getUuid())); + logger.warn("Network offering: {} is not enabled", networkOffering); return false; } List services = networkOfferingServiceMapDao.listServicesForNetworkOffering(networkOffering.getId()); if (services == null || services.isEmpty() || !services.contains("SourceNat")) { - logger.warn(String.format("Network offering ID: %s does not have necessary services to provision Kubernetes cluster", networkOffering.getUuid())); + logger.warn("Network offering: {} does not have necessary services to provision Kubernetes cluster", networkOffering); return false; } if (!networkOffering.isEgressDefaultPolicy()) { - logger.warn(String.format("Network offering ID: %s has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering.getUuid())); + logger.warn("Network offering: {} has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering); return false; } boolean offeringAvailableForZone = false; @@ -337,7 +337,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne } } if (!offeringAvailableForZone) { - logger.warn(String.format("Network offering ID: %s is not available for zone ID: %s", networkOffering.getUuid(), zone.getUuid())); + logger.warn("Network offering: {} is not available for zone: {}", networkOffering, zone); return false; } long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType()); @@ -387,15 +387,15 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne Integer startPort = rule.getSourcePortStart(); Integer endPort = rule.getSourcePortEnd(); if (logger.isDebugEnabled()) { - logger.debug(String.format("Validating rule with purpose: %s for network: %s with ports: %d-%d", purpose.toString(), network.getUuid(), startPort, endPort)); + logger.debug("Validating rule with purpose: {} for network: {} with ports: {}-{}", purpose.toString(), network, startPort, endPort); } if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for API access", network.getUuid(), purpose.toString().toLowerCase())); + throw new InvalidParameterValueException(String.format("Network: %s has conflicting %s rules to provision Kubernetes cluster for API access", network, purpose.toString().toLowerCase())); } int expectedSshStart = KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT; int expectedSshEnd = expectedSshStart + clusterTotalNodeCount - 1; if (Math.max(expectedSshStart, startPort) <= Math.min(expectedSshEnd, endPort)) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for node VM SSH access", network.getUuid(), purpose.toString().toLowerCase())); + throw new InvalidParameterValueException(String.format("Network: %s has conflicting %s rules to provision Kubernetes cluster for node VM SSH access", network, purpose.toString().toLowerCase())); } } } @@ -521,10 +521,10 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Checking host ID: %s for capacity already reserved %d", hostVO.getUuid(), reserved)); + logger.debug("Checking host: {} for capacity already reserved {}", hostVO, reserved); } - if (capacityManager.checkIfHostHasCapacity(hostVO.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - logger.debug("Found host ID == '{}' to have enough capacity, CPU={} RAM={}", hostVO.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); + if (capacityManager.checkIfHostHasCapacity(hostVO, cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + logger.debug("Found host {} to have enough capacity, CPU={} RAM={}", hostVO, cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(hostVO, reserved)); suitable_host_found = true; planCluster = cluster; @@ -533,19 +533,19 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne } if (!suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d with offering ID: %s", zone.getUuid(), i, offering.getUuid())); + logger.info("Suitable hosts not found in datacenter: {} for node {} with offering: {}", zone, i, offering); } break; } } if (suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid())); + logger.info("Suitable hosts found in datacenter: {}, creating deployment destination", zone); } return new DeployDestination(zone, null, planCluster, null); } - String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering ID: %s", - cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getUuid()); + String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering: %s", + cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering); logger.warn(msg); throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } @@ -872,7 +872,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); if (logger.isInfoEnabled()) { - logger.info(String.format("Creating network for account ID: %s from the network offering ID: %s as part of Kubernetes cluster: %s deployment process", owner.getUuid(), networkOffering.getUuid(), clusterName)); + logger.info("Creating network for account: {} from the network offering: {} as part of Kubernetes cluster: {} deployment process", owner, networkOffering, clusterName); } CallContext networkContext = CallContext.register(CallContext.current(), ApiCommandResourceType.Network); @@ -1146,7 +1146,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne try { return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao); } catch (NoTransitionException nte) { - logger.warn(String.format("Failed to transition state of the Kubernetes cluster : %s in state %s on event %s", kubernetesCluster.getName(), kubernetesCluster.getState().toString(), e.toString()), nte); + logger.warn("Failed to transition state of the Kubernetes cluster: {} in state {} on event {}", kubernetesCluster, kubernetesCluster.getState().toString(), e.toString(), nte); return false; } } @@ -1261,7 +1261,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne addKubernetesClusterDetails(cluster, defaultNetwork, cmd); if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); + logger.info("Kubernetes cluster {} has been created", cluster); } CallContext.current().putContextParameter(KubernetesCluster.class, cluster.getUuid()); return cluster; @@ -1352,19 +1352,18 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); if (kubernetesCluster.getState().equals(KubernetesCluster.State.Running)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is in running state", kubernetesCluster.getName())); + logger.debug("Kubernetes cluster {} is in running state", kubernetesCluster); } return true; } if (kubernetesCluster.getState().equals(KubernetesCluster.State.Starting)) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is already in starting state", kubernetesCluster.getName())); - } + if (logger.isDebugEnabled()) + logger.debug("Kubernetes cluster {} is already in starting state", kubernetesCluster); return true; } final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (zone == null) { - logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); + logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster %s", kubernetesCluster)); } KubernetesClusterStartWorker startWorker = new KubernetesClusterStartWorker(kubernetesCluster, this); @@ -1425,13 +1424,13 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is already stopped", kubernetesCluster.getName())); + logger.debug("Kubernetes cluster: {} is already stopped", kubernetesCluster); } return true; } if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopping)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is getting stopped", kubernetesCluster.getName())); + logger.debug("Kubernetes cluster: {} is getting stopped", kubernetesCluster); } return true; } @@ -1787,20 +1786,20 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne List kubernetesClusters = kubernetesClusterDao.findKubernetesClustersToGarbageCollect(); for (KubernetesCluster kubernetesCluster : kubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster garbage collector on Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Running Kubernetes cluster garbage collector on Kubernetes cluster: {}", kubernetesCluster); } try { KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); destroyWorker = ComponentContext.inject(destroyWorker); if (destroyWorker.destroy()) { if (logger.isInfoEnabled()) { - logger.info(String.format("Garbage collection complete for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Garbage collection complete for Kubernetes cluster: {}", kubernetesCluster); } } else { - logger.warn(String.format("Garbage collection failed for Kubernetes cluster : %s, it will be attempted to garbage collected in next run", kubernetesCluster.getName())); + logger.warn("Garbage collection failed for Kubernetes cluster : {}, it will be attempted to garbage collected in next run", kubernetesCluster); } } catch (CloudRuntimeException e) { - logger.warn(String.format("Failed to destroy Kubernetes cluster : %s during GC", kubernetesCluster.getName()), e); + logger.warn("Failed to destroy Kubernetes cluster : {} during GC", kubernetesCluster, e); // proceed further with rest of the Kubernetes cluster garbage collection } } @@ -1844,14 +1843,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne List runningKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Running); for (KubernetesCluster kubernetesCluster : runningKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {}", kubernetesCluster); } try { if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } @@ -1859,14 +1858,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne List stoppedKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Stopped); for (KubernetesCluster kubernetesCluster : stoppedKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Stopped.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Stopped.toString()); } try { if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } @@ -1874,7 +1873,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne List alertKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Alert); for (KubernetesClusterVO kubernetesCluster : alertKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Alert.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Alert.toString()); } try { if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { @@ -1887,7 +1886,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } @@ -1900,7 +1899,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne continue; } if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Starting.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Starting.toString()); } try { if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { @@ -1909,20 +1908,20 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } List destroyingKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Destroying); for (KubernetesCluster kubernetesCluster : destroyingKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Destroying.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Destroying.toString()); } try { KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); destroyWorker = ComponentContext.inject(destroyWorker); destroyWorker.destroy(); } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster : {} status scanner", kubernetesCluster, e); } } } @@ -1940,8 +1939,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne // check cluster is running at desired capacity include control nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s", - clusterVMs.size(), kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), state.toString())); + logger.debug("Found only {} VMs in the Kubernetes cluster {} while expected {} VMs to be in state: {}", + clusterVMs.size(), kubernetesCluster, kubernetesCluster.getTotalNodeCount(), state.toString()); } return false; } @@ -1950,8 +1949,9 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(clusterVm.getVmId()); if (vm.getState() != state) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Found VM : %s in the Kubernetes cluster : %s in state: %s while expected to be in state: %s. So moving the cluster to Alert state for reconciliation", - vm.getUuid(), kubernetesCluster.getName(), vm.getState().toString(), state.toString())); + logger.debug("Found VM: {} in the Kubernetes cluster {} in state: {} while " + + "expected to be in state: {}. So moving the cluster to Alert state for reconciliation", + vm, kubernetesCluster, vm.getState().toString(), state.toString()); } return false; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 270916aab7e..01268f42111 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -29,6 +29,7 @@ import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "kubernetes_cluster") @@ -402,6 +403,13 @@ public class KubernetesClusterVO implements KubernetesCluster { this.maxSize = maxSize; } + @Override + public String toString() { + return String.format("KubernetesCluster %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public Class getEntityType() { return KubernetesCluster.class; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java index efaec61b052..bf49c2abb8d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java @@ -92,8 +92,8 @@ public class KubernetesServiceHelperImpl extends AdapterBase implements Kubernet if (vmMapVO == null) { return; } - logger.error(String.format("VM ID: %s is a part of Kubernetes cluster ID: %d", userVm.getId(), vmMapVO.getClusterId())); KubernetesCluster kubernetesCluster = kubernetesClusterDao.findById(vmMapVO.getClusterId()); + logger.error("VM {} is a part of Kubernetes cluster {} with ID: {}", userVm, kubernetesCluster, vmMapVO.getClusterId()); String msg = "Instance is a part of a Kubernetes cluster"; if (kubernetesCluster != null) { if (KubernetesCluster.ClusterType.ExternalManaged.equals(kubernetesCluster.getClusterType())) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 743962a1f00..076bd105728 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -480,7 +480,7 @@ public class KubernetesClusterActionWorker { try { templateService.attachIso(iso.getId(), vm.getId(), true); if (logger.isInfoEnabled()) { - logger.info(String.format("Attached binaries ISO for VM : %s in cluster: %s", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Attached binaries ISO for VM: {} in cluster: {}", vm, kubernetesCluster); } } catch (CloudRuntimeException ex) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to attach binaries ISO for VM : %s in the Kubernetes cluster name: %s", vm.getDisplayName(), kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent, ex); @@ -502,17 +502,17 @@ public class KubernetesClusterActionWorker { try { result = templateService.detachIso(vm.getId(), true); } catch (CloudRuntimeException ex) { - logger.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName()), ex); + logger.warn("Failed to detach binaries ISO from VM: {} in the Kubernetes cluster: {} ", vm, kubernetesCluster, ex); } finally { CallContext.unregister(); } if (result) { if (logger.isInfoEnabled()) { - logger.info(String.format("Detached Kubernetes binaries from VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Detached Kubernetes binaries from VM: {} in the Kubernetes cluster: {}", vm, kubernetesCluster); } continue; } - logger.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName())); + logger.warn("Failed to detach binaries ISO from VM: {} in the Kubernetes cluster: {} ", vm, kubernetesCluster); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java index 50d7fb14085..fc80c300181 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java @@ -76,8 +76,8 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod || kubernetesCluster.getState().equals(KubernetesCluster.State.Alert) || kubernetesCluster.getState().equals(KubernetesCluster.State.Error) || kubernetesCluster.getState().equals(KubernetesCluster.State.Destroying))) { - String msg = String.format("Cannot perform delete operation on cluster : %s in state: %s", - kubernetesCluster.getName(), kubernetesCluster.getState()); + String msg = String.format("Cannot perform delete operation on cluster %s in state: %s", + kubernetesCluster, kubernetesCluster.getState()); logger.warn(msg); throw new PermissionDeniedException(msg); } @@ -100,15 +100,16 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod try { UserVm vm = userVmService.destroyVm(vmID, true); if (!userVmManager.expunge(userVM)) { - logger.warn(String.format("Unable to expunge VM %s : %s, destroying Kubernetes cluster will probably fail", - vm.getInstanceName() , vm.getUuid())); + logger.warn("Unable to expunge VM {}, destroying Kubernetes cluster will probably fail", vm); } kubernetesClusterVmMapDao.expunge(clusterVM.getId()); if (logger.isInfoEnabled()) { - logger.info(String.format("Destroyed VM : %s as part of Kubernetes cluster : %s cleanup", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Destroyed VM {} as part of Kubernetes cluster : {} cleanup", vm, kubernetesCluster); } } catch (ResourceUnavailableException | ConcurrentOperationException e) { - logger.warn(String.format("Failed to destroy VM : %s part of the Kubernetes cluster : %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getDisplayName(), kubernetesCluster.getName()), e); + logger.warn("Failed to destroy VM {} part of the Kubernetes cluster {} " + + "cleanup. Moving on with destroying remaining resources provisioned " + + "for the Kubernetes cluster", userVM, kubernetesCluster, e); return false; } finally { CallContext.unregister(); @@ -132,13 +133,12 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true); if (!networkDestroyed) { - String msg = String.format("Failed to destroy network : %s as part of Kubernetes cluster : %s cleanup", network.getName(), kubernetesCluster.getName()); + String msg = String.format("Failed to destroy network: %s as part of Kubernetes cluster: %s cleanup", network, kubernetesCluster); logger.warn(msg); throw new ManagementServerException(msg); } if (logger.isInfoEnabled()) { - logger.info(String.format("Destroyed network : %s as part of Kubernetes cluster : %s cleanup", - network.getName(), kubernetesCluster.getName())); + logger.info("Destroyed network: {} as part of Kubernetes cluster: {} cleanup", network, kubernetesCluster); } } } @@ -270,11 +270,11 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod } } } else { - logger.error(String.format("Failed to find network for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.error("Failed to find network for Kubernetes cluster : {}", kubernetesCluster); } } if (logger.isInfoEnabled()) { - logger.info(String.format("Destroying Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Destroying Kubernetes cluster : {}", kubernetesCluster); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.DestroyRequested); boolean vmsDestroyed = destroyClusterVMs(); @@ -285,7 +285,7 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod try { destroyKubernetesClusterNetwork(); } catch (ManagementServerException e) { - String msg = String.format("Failed to destroy network of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); + String msg = String.format("Failed to destroy network of Kubernetes cluster: %s cleanup", kubernetesCluster); logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); @@ -294,7 +294,7 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod try { checkForRulesToDelete(); } catch (ManagementServerException e) { - String msg = String.format("Failed to remove network rules of Kubernetes cluster : %s", kubernetesCluster.getName()); + String msg = String.format("Failed to remove network rules of Kubernetes cluster: %s", kubernetesCluster); logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); @@ -302,14 +302,14 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod try { releaseVpcTierPublicIpIfNeeded(); } catch (InsufficientAddressCapacityException e) { - String msg = String.format("Failed to release public IP for VPC tier used by Kubernetes cluster : %s", kubernetesCluster.getName()); + String msg = String.format("Failed to release public IP for VPC tier used by Kubernetes cluster: %s", kubernetesCluster); logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); } } } else { - String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); + String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster: %s cleanup", kubernetesCluster); logger.warn(msg); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg); @@ -319,12 +319,12 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod kubernetesClusterDetailsDao.removeDetails(kubernetesCluster.getId()); boolean deleted = kubernetesClusterDao.remove(kubernetesCluster.getId()); if (!deleted) { - logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster : %s", kubernetesCluster.getName()), null); + logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster: %s", kubernetesCluster), null); updateKubernetesClusterEntryForGC(); return false; } if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster : %s is successfully deleted", kubernetesCluster.getName())); + logger.info("Kubernetes cluster: {} is successfully deleted", kubernetesCluster); } return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 571c97eeb70..8c983149d02 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -265,7 +265,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu if (logger.isDebugEnabled()) { logger.debug(String.format("Checking host : %s for capacity already reserved %d", h.getName(), reserved)); } - if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (capacityManager.checkIfHostHasCapacity(h, cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { logger.debug("Found host {} with enough capacity: CPU={} RAM={}", h.getName(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(h, reserved)); suitable_host_found = true; @@ -274,19 +274,19 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu } if (!suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts not found in datacenter : %s for node %d, with offering : %s and hypervisor: %s", - zone.getName(), i, offering.getName(), clusterTemplate.getHypervisorType().toString())); + logger.info("Suitable hosts not found in datacenter: {} for node {}, with offering: {} and hypervisor: {}", + zone, i, offering, clusterTemplate.getHypervisorType().toString()); } break; } } if (suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts found in datacenter : %s, creating deployment destination", zone.getName())); + logger.info("Suitable hosts found in datacenter: {}, creating deployment destination", zone); } return new DeployDestination(zone, null, null, null); } - String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering : %s and hypervisor: %s", + String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering: %s and hypervisor: %s", cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getName(), clusterTemplate.getHypervisorType().toString()); logger.warn(msg); @@ -297,7 +297,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Checking deployment destination for Kubernetes cluster : %s in zone : %s", kubernetesCluster.getName(), zone.getName())); + logger.debug("Checking deployment destination for Kubernetes cluster: {} in zone: {}", kubernetesCluster, zone); } return plan(kubernetesCluster.getTotalNodeCount(), zone, offering); } @@ -362,7 +362,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster : %s", kubernetesCluster.getName())); } nodes.add(vm); - logger.info("Provisioned node VM : {} in to the Kubernetes cluster : {}", vm.getDisplayName(), kubernetesCluster.getName()); + logger.info("Provisioned node VM: {} in to the Kubernetes cluster: {}", vm, kubernetesCluster); } finally { CallContext.unregister(); } @@ -420,7 +420,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { - logger.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); + logger.info("Created node VM : {}, {} in the Kubernetes cluster : {}", hostName, nodeVm, kubernetesCluster.getName()); } return nodeVm; } @@ -469,7 +469,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu }); rulesService.applyPortForwardingRules(publicIp.getId(), account); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned SSH port forwarding rule: %s from port %d to %d on %s to the VM IP : %s in Kubernetes cluster : %s", pfRule.getUuid(), sourcePort, destPort, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); + logger.info("Provisioned SSH port forwarding rule: {} from port {} to {} on {} to the VM IP: {} in Kubernetes cluster: {}", pfRule, sourcePort, destPort, publicIp.getAddress().addr(), vmIp, kubernetesCluster); } } @@ -637,7 +637,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); + logger.info("Provisioned firewall rule to open up port {} to {} on {} for Kubernetes cluster: {}", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -652,8 +652,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); + logger.info("Provisioned firewall rule to open up port {} on {} for Kubernetes cluster {}", CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -703,8 +702,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu try { provisionVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Provisioned ACL rule to open up port {} on {} for Kubernetes cluster {}", CLUSTER_API_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -715,8 +713,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu try { provisionVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", - DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Provisioned ACL rule to open up port {} on {} for Kubernetes cluster {}", DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -733,8 +730,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu try { removeVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Removed network ACL rule to open up port {} on {} for Kubernetes cluster {}", CLUSTER_API_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to remove network ACL rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -743,8 +739,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu try { removeVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", - DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Removed network ACL rule to open up port {} on {} for Kubernetes cluster {}", DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to remove network ACL rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index aa500de9190..de85e6231f2 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -164,9 +164,9 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif */ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) throws ManagementServerException { if (manager.isDirectAccess(network)) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network or ROUTED network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); - } + if (logger.isDebugEnabled()) + logger.debug("Network: {} for Kubernetes cluster: {} is not an isolated network " + + "or ROUTED network, therefore, no need for network rules", network, kubernetesCluster); return; } if (network.getVpcId() != null) { @@ -206,7 +206,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { - logger.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); + logger.warn("Draining node: {} on VM: {} in Kubernetes cluster: {} unsuccessful", hostName, userVm, kubernetesCluster); } else { result = SshHelper.sshExecute(ipAddress, port, getControlNodeLoginUser(), pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), @@ -214,18 +214,18 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif if (result.first()) { return true; } else { - logger.warn(String.format("Deleting node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); + logger.warn("Deleting node: {} on VM: {} in Kubernetes cluster: {} unsuccessful", hostName, userVm, kubernetesCluster); } } break; } catch (Exception e) { - String msg = String.format("Failed to remove Kubernetes cluster : %s node: %s on VM : %s", kubernetesCluster.getName(), hostName, userVm.getDisplayName()); + String msg = String.format("Failed to remove Kubernetes cluster: %s node: %s on VM: %s", kubernetesCluster, hostName, userVm); logger.warn(msg, e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - logger.error(String.format("Error while waiting for Kubernetes cluster : %s node: %s on VM : %s removal", kubernetesCluster.getName(), hostName, userVm.getDisplayName()), ie); + logger.error("Error while waiting for Kubernetes cluster: {} node: {} on VM: {} removal", kubernetesCluster, hostName, userVm, ie); } retryCounter++; } @@ -316,9 +316,11 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); - logger.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName())); + logger.info("Removing vm {} from cluster {}", userVM, kubernetesCluster); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes" + + " cluster %s, failed to remove Kubernetes node: %s running on VM : %s", + kubernetesCluster, userVM.getHostName(), userVM), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } CallContext vmContext = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); @@ -327,17 +329,18 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif UserVm vm = userVmService.destroyVm(userVM.getId(), true); if (!userVmManager.expunge(userVM)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to expunge VM '%s'." - , kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + , kubernetesCluster, vm), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (ResourceUnavailableException e) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to remove VM ID: %s", - kubernetesCluster.getName() , userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + kubernetesCluster, userVM), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } finally { CallContext.unregister(); } kubernetesClusterVmMapDao.expunge(vmMapVO.getId()); if (System.currentTimeMillis() > scaleTimeoutTime) { - logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", + kubernetesCluster), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } @@ -346,7 +349,9 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes " + + "cluster %s, unable to update network rules", kubernetesCluster), + kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } } @@ -437,13 +442,13 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif public boolean scaleCluster() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Scaling Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Scaling Kubernetes cluster {}", kubernetesCluster); } scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000; final long originalClusterSize = kubernetesCluster.getNodeCount(); final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); if (existingServiceOffering == null) { - logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName())); + logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster)); } final boolean autoscalingChanged = isAutoscalingChanged(); final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 028ea1c7992..a2384a2e0fe 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -232,7 +232,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { - logger.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); + logger.info("Created control VM: {}, {} in the Kubernetes cluster: {}", controlVm, hostName, kubernetesCluster); } return controlVm; } @@ -310,7 +310,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } if (logger.isInfoEnabled()) { - logger.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); + logger.info("Created control VM: {}, {} in the Kubernetes cluster: {}", additionalControlVm, hostName, kubernetesCluster); } return additionalControlVm; } @@ -329,7 +329,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif throw new ManagementServerException(String.format("Failed to provision control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned the control VM : %s in to the Kubernetes cluster : %s", k8sControlVM.getDisplayName(), kubernetesCluster.getName())); + logger.info("Provisioned the control VM: {} in to the Kubernetes cluster: {}", k8sControlVM, kubernetesCluster); } return k8sControlVM; } @@ -352,7 +352,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } additionalControlVms.add(vm); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Provisioned additional control VM: {} in to the Kubernetes cluster: {}", vm, kubernetesCluster); } } } @@ -371,10 +371,10 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif try { networkMgr.startNetwork(network.getId(), destination, context); if (logger.isInfoEnabled()) { - logger.info(String.format("Network : %s is started for the Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + logger.info("Network: {} is started for the Kubernetes cluster: {}", network, kubernetesCluster); } } catch (ConcurrentOperationException | ResourceUnavailableException |InsufficientCapacityException e) { - String msg = String.format("Failed to start Kubernetes cluster : %s as unable to start associated network : %s" , kubernetesCluster.getName(), network.getName()); + String msg = String.format("Failed to start Kubernetes cluster: %s as unable to start associated network: %s" , kubernetesCluster, network); logger.error(msg, e); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); throw new ManagementServerException(msg, e); @@ -385,7 +385,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif protected void setupKubernetesClusterNetworkRules(Network network, List clusterVMs) throws ManagementServerException { if (manager.isDirectAccess(network)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network or ROUTED network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); + logger.debug("Network: {} for Kubernetes cluster: {} is not an isolated network or ROUTED network, therefore, no need for network rules", network, kubernetesCluster); } return; } @@ -416,7 +416,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif resizeNodeVolume(vm); startKubernetesVM(vm); } catch (ManagementServerException ex) { - logger.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex); + logger.warn("Failed to start VM: {} in Kubernetes cluster: {} due to {}", vm, kubernetesCluster, ex); // don't bail out here. proceed further to stop the reset of the VM's } } @@ -471,7 +471,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif public boolean startKubernetesClusterOnCreate() { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Starting Kubernetes cluster: {}", kubernetesCluster); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); @@ -530,7 +530,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster : %s VMs successfully provisioned", kubernetesCluster.getName())); + logger.info("Kubernetes cluster: {} VMs successfully provisioned", kubernetesCluster); } try { setupKubernetesClusterNetworkRules(network, clusterVMs); @@ -577,7 +577,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Starting Kubernetes cluster: {}", kubernetesCluster); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); @@ -604,7 +604,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster : %s successfully started", kubernetesCluster.getName())); + logger.info("Kubernetes cluster: {} successfully started", kubernetesCluster); } return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java index 60802d12e72..59d74751dff 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java @@ -38,7 +38,7 @@ public class KubernetesClusterStopWorker extends KubernetesClusterActionWorker { public boolean stop() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Stopping Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Stopping Kubernetes cluster: {}", kubernetesCluster); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested); List clusterVMs = getKubernetesClusterVMs(); @@ -51,8 +51,7 @@ public class KubernetesClusterStopWorker extends KubernetesClusterActionWorker { try { userVmService.stopVirtualMachine(vm.getId(), false); } catch (ConcurrentOperationException ex) { - logger.warn(String.format("Failed to stop VM : %s in Kubernetes cluster : %s", - vm.getDisplayName(), kubernetesCluster.getName()), ex); + logger.warn("Failed to stop VM: {} in Kubernetes cluster: {}", vm, kubernetesCluster, ex); } finally { CallContext.unregister(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 4fefa54a6d9..ab3121f207b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -85,8 +85,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke } Pair result; if (logger.isInfoEnabled()) { - logger.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", - vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + logger.info("Upgrading node on VM {} in Kubernetes cluster {} with Kubernetes version {}", vm, kubernetesCluster, upgradeVersion); } String errorMessage = String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()); for (int retry = KubernetesClusterService.KubernetesClusterUpgradeRetries.value(); retry >= 0; retry--) { @@ -149,8 +148,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get Kubernetes node on VM : %s upgraded to version %s", kubernetesCluster.getName(), vm.getDisplayName(), upgradeVersion.getSemanticVersion()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } if (logger.isInfoEnabled()) { - logger.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", - vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + logger.info("Successfully upgraded node on VM {} in Kubernetes cluster {} with Kubernetes version {}", vm, kubernetesCluster, upgradeVersion); } } } @@ -158,18 +156,18 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke public boolean upgradeCluster() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Upgrading Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Upgrading Kubernetes cluster: {}", kubernetesCluster); } upgradeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterUpgradeTimeout.value() * 1000; Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); publicIpAddress = publicIpSshPort.first(); sshPort = publicIpSshPort.second(); if (StringUtils.isEmpty(publicIpAddress)) { - logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve associated public IP", kubernetesCluster.getName())); + logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster: %s, unable to retrieve associated public IP", kubernetesCluster)); } clusterVMs = getKubernetesClusterVMs(); if (CollectionUtils.isEmpty(clusterVMs)) { - logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve VMs for cluster", kubernetesCluster.getName())); + logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster: %s, unable to retrieve VMs for cluster", kubernetesCluster)); } retrieveScriptFiles(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index 7a3268014fd..74e8b0c9b23 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -59,7 +59,7 @@ public class KubernetesClusterUtil { return true; } if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster : %s. Output: %s", nodeName, kubernetesCluster.getName(), result.second())); + LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster: %s. Output: %s", nodeName, kubernetesCluster, result.second())); } return false; } @@ -72,7 +72,7 @@ public class KubernetesClusterUtil { try { ready = isKubernetesClusterNodeReady(kubernetesCluster, ipAddress, port, user, sshKeyFile, nodeName); } catch (Exception e) { - LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster : %s", nodeName, kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster: %s", nodeName, kubernetesCluster), e); } if (ready) { return true; @@ -80,7 +80,7 @@ public class KubernetesClusterUtil { try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster : %s node: %s to become ready", kubernetesCluster.getName(), nodeName), ie); + LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s node: %s to become ready", kubernetesCluster, nodeName), ie); } } return false; @@ -120,14 +120,14 @@ public class KubernetesClusterUtil { return true; } } catch (Exception e) { - LOGGER.warn(String.format("Failed to uncordon node: %s on VM ID : %s in Kubernetes cluster : %s", - hostName, userVm.getUuid(), kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to uncordon node: %s on VM %s in Kubernetes cluster %s", + hostName, userVm, kubernetesCluster), e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster : %s node: %s on VM : %s", - kubernetesCluster.getName(), hostName, userVm.getUuid()), ie); + LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster %s node: %s on VM %s", + kubernetesCluster, hostName, userVm), ie); } } return false; @@ -150,14 +150,14 @@ public class KubernetesClusterUtil { lines) { if (line.contains(serviceName) && line.contains("Running")) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Service : %s in namespace: %s for the Kubernetes cluster : %s is running", serviceName, namespace, kubernetesCluster.getName())); + LOGGER.debug(String.format("Service %s in namespace %s for the Kubernetes cluster %s is running", serviceName, namespace, kubernetesCluster)); } return true; } } } } catch (Exception e) { - LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster : %s", serviceName, namespace, kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster %s", serviceName, namespace, kubernetesCluster), e); } return false; } @@ -169,11 +169,11 @@ public class KubernetesClusterUtil { // Check if dashboard service is up running. while (System.currentTimeMillis() < timeoutTime) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster : %s to come up", kubernetesCluster.getName())); + LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster: %s to come up", kubernetesCluster)); } if (isKubernetesClusterAddOnServiceRunning(kubernetesCluster, ipAddress, port, user, sshKeyFile, "kubernetes-dashboard", "kubernetes-dashboard")) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Dashboard service for the Kubernetes cluster : %s is in running state", kubernetesCluster.getName())); + LOGGER.info(String.format("Dashboard service for the Kubernetes cluster %s is in running state", kubernetesCluster)); } running = true; break; @@ -181,7 +181,7 @@ public class KubernetesClusterUtil { try { Thread.sleep(waitDuration); } catch (InterruptedException ex) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s API dashboard service to be available", kubernetesCluster.getName()), ex); + LOGGER.error(String.format("Error while waiting for Kubernetes cluster %s API dashboard service to be available", kubernetesCluster), ex); } } return running; @@ -201,11 +201,11 @@ public class KubernetesClusterUtil { break; } else { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster : %s. Output: %s", kubernetesCluster.getName(), result.second())); + LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster: %s. Output: %s", kubernetesCluster, result.second())); } } } catch (Exception e) { - LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster : %s", kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster %s", kubernetesCluster), e); } } return kubeConfig; @@ -221,7 +221,7 @@ public class KubernetesClusterUtil { return Integer.parseInt(result.second().trim().replace("\"", "")); } else { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster : %s. Output: %s", kubernetesCluster.getName(), result.second())); + LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster %s. Output: %s", kubernetesCluster, result.second())); } } return 0; @@ -241,18 +241,18 @@ public class KubernetesClusterUtil { String versionOutput = br.lines().collect(Collectors.joining()); if (StringUtils.isNotEmpty(versionOutput)) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s API has been successfully provisioned, %s", kubernetesCluster.getName(), versionOutput)); + LOGGER.info(String.format("Kubernetes cluster %s API has been successfully provisioned, %s", kubernetesCluster, versionOutput)); } k8sApiServerSetup = true; break; } } catch (Exception e) { - LOGGER.warn(String.format("API endpoint for Kubernetes cluster : %s not available", kubernetesCluster.getName()), e); + LOGGER.warn(String.format("API endpoint for Kubernetes cluster %s not available", kubernetesCluster), e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster : %s API endpoint to be available", kubernetesCluster.getName()), ie); + LOGGER.error(String.format("Error while waiting for Kubernetes cluster %s API endpoint to be available", kubernetesCluster), ie); } } return k8sApiServerSetup; @@ -266,11 +266,11 @@ public class KubernetesClusterUtil { socket.connect(new InetSocketAddress(ipAddress, port), 10000); controlVmRunning = true; } catch (IOException e) { - LOGGER.info("Waiting for Kubernetes cluster : {} control node VMs to be accessible", kubernetesCluster.getName()); + LOGGER.info("Waiting for Kubernetes cluster {} control node VMs to be accessible", kubernetesCluster); try { Thread.sleep(10000); } catch (InterruptedException ex) { - LOGGER.warn("Error while waiting for Kubernetes cluster : {} control node VMs to be accessible", kubernetesCluster.getName(), ex); + LOGGER.warn("Error while waiting for Kubernetes cluster {} control node VMs to be accessible", kubernetesCluster, ex); } } } @@ -283,28 +283,28 @@ public class KubernetesClusterUtil { final long timeoutTime, final long waitDuration) { while (System.currentTimeMillis() < timeoutTime) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster : %s with total %d provisioned nodes", kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount())); + LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster %s with total %d provisioned nodes", kubernetesCluster, kubernetesCluster.getTotalNodeCount())); } try { int nodesCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, ipAddress, port, user, sshKeyFile); if (nodesCount == kubernetesCluster.getTotalNodeCount()) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s has %d ready nodes now", kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount())); + LOGGER.info(String.format("Kubernetes cluster %s has %d ready nodes now", kubernetesCluster, kubernetesCluster.getTotalNodeCount())); } return true; } else { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Kubernetes cluster : %s has total %d provisioned nodes while %d ready now", kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), nodesCount)); + LOGGER.debug(String.format("Kubernetes cluster %s has total %d provisioned nodes while %d ready now", kubernetesCluster, kubernetesCluster.getTotalNodeCount(), nodesCount)); } } } catch (Exception e) { - LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster : %s", kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster %s", kubernetesCluster), e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ex) { - LOGGER.warn(String.format("Error while waiting during Kubernetes cluster : %s ready node check", kubernetesCluster.getName()), ex); + LOGGER.warn(String.format("Error while waiting during Kubernetes cluster %s ready node check", kubernetesCluster), ex); } } return false; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java index 455561b4020..ea8fe3ca7fd 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java @@ -30,6 +30,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "kubernetes_supported_version") @@ -85,6 +86,13 @@ public class KubernetesSupportedVersionVO implements KubernetesSupportedVersion this.minimumRamSize = minimumRamSize; } + @Override + public String toString() { + return String.format("KubernetesSupportedVersion %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "semanticVersion")); + } + @Override public long getId() { return id; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 93e1ae2810a..86247b53d32 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -152,7 +152,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne versions.remove(i); } } catch (IllegalArgumentException e) { - logger.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion)); + logger.warn("Unable to compare Kubernetes version for supported version {} with {}", version, minimumSemanticVersion); versions.remove(i); } } @@ -383,13 +383,13 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne VMTemplateVO template = templateDao.findByIdIncludingRemoved(version.getIsoId()); if (template == null) { - logger.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid())); + logger.warn("Unable to find ISO associated with supported Kubernetes version {}", version); } if (template != null && template.getRemoved() == null) { // Delete ISO try { deleteKubernetesVersionIso(template.getId()); } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException ex) { - logger.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex); + logger.error("Unable to delete binaries ISO: {} associated with supported kubernetes version: {}", template, version, ex); throw new CloudRuntimeException(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid())); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java index 2ce8151c063..ca7eb985be0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java @@ -93,7 +93,8 @@ public class DeleteKubernetesClusterCmd extends BaseAsyncCmd { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.deleteKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster %s with id: %d", cluster, getId())); } SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index ee3566564e4..59c2bebf961 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -158,7 +158,8 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.scaleKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster %s with id %d", cluster, getId())); } final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); response.setResponseName(getCommandName()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java index 23d6878cf72..645e45a4c5f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java @@ -100,7 +100,8 @@ public class StopKubernetesClusterCmd extends BaseAsyncCmd { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.stopKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to stop Kubernetes cluster %s with id %d", cluster, getId())); } final SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java index a3f2e057645..04a2075c50d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java @@ -110,7 +110,8 @@ public class UpgradeKubernetesClusterCmd extends BaseAsyncCmd { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.upgradeKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster %s with id %d", cluster, getId())); } final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); response.setResponseName(getCommandName()); diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index 6025a41d69c..51524c12912 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -851,7 +851,7 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements } for (final ManagementServerResponse managementServerResponse: managementServerResponses) { if(logger.isDebugEnabled()) { - logger.debug(String.format("Processing metrics for MS hosts %s.", managementServerResponse.getId())); + logger.debug("Processing metrics for MS host [id: {}, name: {}].", managementServerResponse.getId(), managementServerResponse.getName()); } ManagementServerMetricsResponse metricsResponse = new ManagementServerMetricsResponse(); diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java index 1338eebdb73..7fec1e2c093 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "external_bigswitch_bcf_devices") @@ -90,6 +91,13 @@ public class BigSwitchBcfDeviceVO implements InternalIdentity { this.hash = hash; } + @Override + public String toString() { + return String.format("BigSwitchBcfDevice %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java index 5fc9480c610..adb957c3b5e 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java @@ -192,18 +192,18 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { } private boolean canHandle(Network network, Service service) { - logger.debug("Checking if BigSwitchBcfElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if BigSwitchBcfElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Vlan) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("BigSwitchBcfElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("BigSwitchBcfElement is not a provider for network %s", network)); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, BcfConstants.BIG_SWITCH_BCF)) { - logger.debug("BigSwitchBcfElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug(String.format("BigSwitchBcfElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -421,11 +421,11 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", + networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", + ntwkSvcProvider.getProviderName(), physicalNetwork)); } ntwkSvcProvider.setFirewallServiceProvided(true); ntwkSvcProvider.setGatewayServiceProvided(true); @@ -527,7 +527,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { Long bigswitchBcfDeviceId = cmd.getBigSwitchBcfDeviceId(); BigSwitchBcfDeviceVO bigswitchBcfDevice = _bigswitchBcfDao.findById(bigswitchBcfDeviceId); if (bigswitchBcfDevice == null) { - throw new InvalidParameterValueException("Could not find a BigSwitch Controller with id " + bigswitchBcfDevice); + throw new InvalidParameterValueException(String.format("Could not find a BigSwitch Controller with id %d", bigswitchBcfDeviceId)); } HostVO bigswitchHost = _hostDao.findById(bigswitchBcfDevice.getHostId()); @@ -554,7 +554,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { if (bigswitchBcfDeviceId != null) { BigSwitchBcfDeviceVO bigswitchBcfDevice = _bigswitchBcfDao.findById(bigswitchBcfDeviceId); if (bigswitchBcfDevice == null) { - throw new InvalidParameterValueException("Could not find BigSwitch controller with id: " + bigswitchBcfDevice); + throw new InvalidParameterValueException(String.format("Could not find BigSwitch controller with id: %d", bigswitchBcfDeviceId)); } responseList.add(bigswitchBcfDevice); } else { diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java index f9c11e50748..cefae2dcadd 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java @@ -153,12 +153,11 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne List devices = _bigswitchBcfDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - logger.error("No BigSwitch Controller on physical network " + physnet.getName()); + logger.error(String.format("No BigSwitch Controller on physical network %s", physnet)); return null; } for (BigSwitchBcfDeviceVO d: devices){ - logger.debug("BigSwitch Controller " + d.getUuid() - + " found on physical network " + physnet.getId()); + logger.debug(String.format("BigSwitch Controller %s found on physical network %s", d, physnet)); } logger.debug("Physical isolation type is BCF_SEGMENT, asking GuestNetworkGuru to design this network"); @@ -309,7 +308,7 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vlan || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } @@ -353,8 +352,7 @@ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements Ne tenantId = vpc.getUuid(); tenantName = vpc.getName(); boolean released = _vpcDao.releaseFromLockTable(vpc.getId()); - logger.debug("BCF guru release lock vpc id: " + vpc.getId() - + " released? " + released); + logger.debug(String.format("BCF guru release lock vpc: %s released? %s", vpc, released)); } else { // use network id in CS as tenant in BSN // use network uuid as tenant id for non-VPC networks diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java index daf9c1c4e08..326ea494860 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java @@ -136,18 +136,18 @@ public class BrocadeVcsElement extends AdapterBase implements NetworkElement, Re } protected boolean canHandle(Network network, Service service) { - logger.debug("Checking if BrocadeVcsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if BrocadeVcsElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Vcs) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("BrocadeVcsElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("BrocadeVcsElement is not a provider for network %s", network)); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.BrocadeVcs)) { - logger.debug("BrocadeVcsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug(String.format("BrocadeVcsElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -164,7 +164,7 @@ public class BrocadeVcsElement extends AdapterBase implements NetworkElement, Re @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("entering BrocadeVcsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug(String.format("entering BrocadeVcsElement implement function for network %s (state %s)", network, network.getState())); if (!canHandle(network, Service.Connectivity)) { return false; @@ -276,11 +276,9 @@ public class BrocadeVcsElement extends AdapterBase implements NetworkElement, Re final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); } Map params = new HashMap(); diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java index 8d2125d70eb..4fe75ba7519 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java @@ -142,7 +142,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { if (answer == null || !answer.getResult()) { logger.error("CreateNetworkCommand failed"); - logger.error("Unable to create network " + network.getId()); + logger.error(String.format("Unable to create network %s", network)); return null; } @@ -179,7 +179,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { if (answer == null || !answer.getResult()) { logger.error("AssociateMacToNetworkCommand failed"); - throw new InsufficientVirtualNetworkCapacityException("Unable to associate mac " + interfaceMac + " to network " + network.getId(), DataCenter.class, dc.getId()); + throw new InsufficientVirtualNetworkCapacityException(String.format("Unable to associate mac %s to network %s", interfaceMac, network), DataCenter.class, dc.getId()); } } @@ -204,7 +204,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { if (answer == null || !answer.getResult()) { logger.error("DisassociateMacFromNetworkCommand failed"); - logger.error("Unable to disassociate mac " + interfaceMac + " from network " + network.getId()); + logger.error(String.format("Unable to disassociate mac %s from network %s", interfaceMac, network)); return; } } @@ -232,7 +232,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { if (brocadeVcsNetworkVlanMapping != null) { vlanTag = brocadeVcsNetworkVlanMapping.getVlanId(); } else { - logger.error("Not able to find vlanId for network " + network.getId()); + logger.error(String.format("Not able to find vlanId for network %s", network)); return false; } @@ -250,7 +250,7 @@ public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { if (answer == null || !answer.getResult()) { logger.error("DeleteNetworkCommand failed"); - logger.error("Unable to delete network " + network.getId()); + logger.error(String.format("Unable to delete network %s", network)); return false; } } diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java index 0f843d7f4d1..f03ea9a5656 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.cisco; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -62,6 +64,13 @@ public class CiscoVnmcControllerVO implements CiscoVnmcController { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("CiscoVnmcController %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java index bea5a2c3f25..2ec05f679ad 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java @@ -28,6 +28,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import javax.persistence.EntityExistsException; +import com.cloud.user.User; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -280,24 +281,24 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro final List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return false; } List asaList = _ciscoAsa1000vDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (asaList.isEmpty()) { - logger.debug("No Cisco ASA 1000v device on network " + network.getName()); + logger.debug("No Cisco ASA 1000v device on network {}", network); return false; } NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork != null) { - logger.debug("Cisco ASA 1000v device already associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device already associated with network {}", network); return true; } if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.CiscoVnmc)) { - logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("SourceNat service is not provided by Cisco Vnmc device on network {}", network); return false; } @@ -305,21 +306,21 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro // ensure that there is an ASA 1000v assigned to this network CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network); if (assignedAsa == null) { - logger.error("Unable to assign ASA 1000v device to network " + network.getName()); - throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName()); + logger.error("Unable to assign ASA 1000v device to network {}", network); + throw new CloudRuntimeException(String.format("Unable to assign ASA 1000v device to network %s", network)); } ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId()); ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId()); if (clusterVsmMap == null) { - logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); - throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); + logger.error("Vmware cluster {} has no Cisco Nexus VSM device associated with it", asaCluster); + throw new CloudRuntimeException(String.format("Vmware cluster %s has no Cisco Nexus VSM device associated with it", asaCluster)); } CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId()); if (vsmDevice == null) { - logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); - throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); + logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster {}", asaCluster); + throw new CloudRuntimeException(String.format("Unable to load details of Cisco Nexus VSM device associated with cluster %s", asaCluster)); } CiscoVnmcControllerVO ciscoVnmcDevice = devices.get(0); @@ -350,8 +351,8 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (outsideIp == null) { // none available, acquire one try { Account caller = CallContext.current().getCallingAccount(); - long callerUserId = CallContext.current().getCallingUserId(); - outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone, true, null); + User callerUser = CallContext.current().getCallingUser(); + outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUser, zone, true, null); } catch (ResourceAllocationException e) { logger.error("Unable to allocate additional public Ip address. Exception details " + e); throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e); @@ -373,29 +374,27 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro // all public ip addresses must be from same subnet, this essentially means single public subnet in zone if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) { - logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); - throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to create logical edge firewall in Cisco VNMC device for network {}", network); + throw new CloudRuntimeException(String.format("Failed to create logical edge firewall in Cisco VNMC device for network %s", network)); } // create stuff in VSM for ASA device if (!configureNexusVsmForAsa(vlanId, network.getGateway(), vsmDevice.getUserName(), vsmDevice.getPassword(), vsmDevice.getipaddr(), assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) { - logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); - throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); + logger.error("Failed to configure Cisco Nexus VSM {} for ASA device for network {}", vsmDevice.getipaddr(), network); + throw new CloudRuntimeException(String.format("Failed to configure Cisco Nexus VSM %s for ASA device for network %s", vsmDevice.getipaddr(), network)); } // configure source NAT if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) { - logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); - throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to configure source NAT in Cisco VNMC device for network {}", network); + throw new CloudRuntimeException(String.format("Failed to configure source NAT in Cisco VNMC device for network %s", network)); } // associate Asa 1000v instance with logical edge firewall if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) { - logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + - network.getName()); - throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + - ") with logical edge firewall in VNMC for network " + network.getName()); + logger.error("Failed to associate Cisco ASA 1000v ({}) with logical edge firewall in VNMC for network {}", assignedAsa.getManagementIp(), network); + throw new CloudRuntimeException(String.format("Failed to associate Cisco ASA 1000v (%s) with logical edge firewall in VNMC for network %s", assignedAsa.getManagementIp(), network)); } } catch (CloudRuntimeException e) { unassignAsa1000vFromNetwork(network); @@ -514,11 +513,11 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", + networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", + ntwkSvcProvider.getProviderName(), physicalNetwork)); } if (_ciscoVnmcDao.listByPhysicalNetwork(physicalNetworkId).size() != 0) { @@ -590,7 +589,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (physicalNetwork != null) { List responseList = _ciscoAsa1000vDao.listByPhysicalNetwork(physicalNetworkId); if (responseList.size() > 0) { - throw new CloudRuntimeException("Cisco VNMC appliance with id " + vnmcResourceId + " cannot be deleted as there Cisco ASA 1000v appliances using it"); + throw new CloudRuntimeException(String.format("Cisco VNMC appliance %s cannot be deleted as there Cisco ASA 1000v appliances using it", vnmcResource)); } } @@ -617,7 +616,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (ciscoVnmcResourceId != null) { CiscoVnmcControllerVO ciscoVnmcResource = _ciscoVnmcDao.findById(ciscoVnmcResourceId); if (ciscoVnmcResource == null) { - throw new InvalidParameterValueException("Could not find Cisco Vnmc device with id: " + ciscoVnmcResource); + throw new InvalidParameterValueException(String.format("Could not find Cisco Vnmc device with id: %d", ciscoVnmcResourceId)); } responseList.add(ciscoVnmcResource); } else { @@ -640,27 +639,26 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro public boolean applyFWRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Firewall, Provider.CiscoVnmc)) { - logger.error("Firewall service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Firewall service is not provided by Cisco Vnmc device on network {}", network); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network {}", network); return true; } if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -698,27 +696,26 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro public boolean applyPFRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.PortForwarding, Provider.CiscoVnmc)) { - logger.error("Port forwarding service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Port forwarding service is not provided by Cisco Vnmc device on network {}", network); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network {}", network); return true; } if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply port forwarding rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply port forwarding rules for network with ID {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -752,27 +749,26 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro @Override public boolean applyStaticNats(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.StaticNat, Provider.CiscoVnmc)) { - logger.error("Static NAT service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Static NAT service is not provided by Cisco Vnmc device on network {}", network); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network {}", network); return true; } if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply static NAT rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply static NAT rules for network with ID {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -878,7 +874,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro NetworkAsa1000vMapVO networkAsaMap = _networkAsa1000vMapDao.findByAsa1000vId(asaResource.getId()); if (networkAsaMap != null) { - throw new CloudRuntimeException("Cisco ASA 1000v appliance with id " + asaResourceId + " cannot be deleted as it is associated with guest network"); + throw new CloudRuntimeException(String.format("Cisco ASA 1000v appliance %s cannot be deleted as it is associated with guest network", asaResource)); } _ciscoAsa1000vDao.remove(asaResourceId); diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java index c1ea7823811..06ff497dfd2 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -72,7 +72,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan private boolean canHandle(Network network, List rules) { if (network.getGuestType() != Network.GuestType.Shared || network.getTrafficType() != TrafficType.Guest) { - logger.debug("Not handling network with type " + network.getGuestType() + " and traffic type " + network.getTrafficType()); + logger.debug("Not handling network {} with type {} and traffic type {}", network, network.getGuestType(), network.getTrafficType()); return false; } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index c7f4b8bf244..c02d8cf67aa 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -247,7 +247,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast DomainRouterVO elbVm = findElbVmForLb(rules.get(0)); if (elbVm == null) { - logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network " + network.getId()); + logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network {}", network); throw new ResourceUnavailableException("Unable to apply lb rules", DataCenter.class, network.getDataCenterId()); } @@ -267,10 +267,10 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast } return applyLBRules(elbVm, lbRules, network.getId()); } else if (elbVm.getState() == State.Stopped || elbVm.getState() == State.Stopping) { - logger.debug("ELB VM is in " + elbVm.getState() + ", so not sending apply LoadBalancing rules commands to the backend"); + logger.debug(String.format("ELB VM %s is in %s, so not sending apply LoadBalancing rules commands to the backend", elbVm, elbVm.getState())); return true; } else { - logger.warn("Unable to apply loadbalancing rules, ELB VM is not in the right state " + elbVm.getState()); + logger.warn(String.format("Unable to apply loadbalancing rules, ELB VM %s is not in the right state %s", elbVm, elbVm.getState())); throw new ResourceUnavailableException("Unable to apply loadbalancing rules, ELB VM is not in the right state", VirtualRouter.class, elbVm.getId()); } } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java index 6812fa49532..4e331891485 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java @@ -27,6 +27,7 @@ import java.util.Random; import javax.inject.Inject; +import com.cloud.network.dao.PhysicalNetworkDao; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -124,6 +125,8 @@ public class LoadBalanceRuleHandler { @Inject private IpAddressManager _ipAddrMgr; @Inject + private PhysicalNetworkDao physicalNetworkDao; + @Inject protected NetworkDao _networkDao; @Inject protected NetworkOfferingDao _networkOfferingDao; @@ -230,7 +233,7 @@ public class LoadBalanceRuleHandler { guestNetwork = _networkDao.acquireInLockTable(guestNetworkId); if (guestNetwork == null) { - throw new ConcurrentOperationException("Unable to acquire network lock: " + guestNetworkId); + throw new ConcurrentOperationException(String.format("Unable to acquire lock for the network: %s", guestNetwork)); } try { @@ -272,11 +275,12 @@ public class LoadBalanceRuleHandler { final Long physicalNetworkId = _networkModel.getPhysicalNetworkId(guestNetwork); final PhysicalNetworkServiceProvider provider = _physicalProviderDao.findByServiceProvider(physicalNetworkId, typeString); if (provider == null) { - throw new CloudRuntimeException("Cannot find service provider " + typeString + " in physical network " + physicalNetworkId); + throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s with id %d", + typeString, physicalNetworkDao.findById(physicalNetworkId), physicalNetworkId)); } final VirtualRouterProvider vrProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), Type.ElasticLoadBalancerVm); if (vrProvider == null) { - throw new CloudRuntimeException("Cannot find virtual router provider " + typeString + " as service provider " + provider.getId()); + throw new CloudRuntimeException(String.format("Cannot find virtual router provider %s as service provider %s", typeString, provider)); } long userId = CallContext.current().getCallingUserId(); @@ -314,7 +318,7 @@ public class LoadBalanceRuleHandler { final IPAddressVO ipvo = _ipAddressDao.findById(ipId); ipvo.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipvo.getId(), ipvo); - _ipAddrMgr.disassociatePublicIpAddress(ipId, userId, caller); + _ipAddrMgr.disassociatePublicIpAddress(ipvo, userId, caller); _ipAddressDao.unassignIpAddress(ipId); } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index 0a9b4a7131a..27a06899e01 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -347,8 +347,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala //2.3 Apply Internal LB rules on the VM if (!_internalLbMgr.applyLoadBalancingRules(network, entry.getValue(), internalLbVms)) { - throw new CloudRuntimeException("Failed to apply load balancing rules for ip " + sourceIp.addr() + - " in network " + network.getId() + " on element " + getName()); + throw new CloudRuntimeException(String.format("Failed to apply load balancing rules for ip %s in network %s on element %s", sourceIp.addr(), network, getName())); } } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index 9a5c5a7c6a9..9469af7eb75 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -300,7 +300,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (answer != null && answer instanceof GetDomRVersionAnswer) { final GetDomRVersionAnswer versionAnswer = (GetDomRVersionAnswer)answer; if (answer == null || !answer.getResult()) { - logger.warn("Unable to get the template/scripts version of internal LB VM " + internalLbVm.getInstanceName() + " due to: " + versionAnswer.getDetails()); + logger.warn(String.format("Unable to get the template/scripts version of internal LB VM %s due to: %s", internalLbVm, versionAnswer.getDetails())); result = false; } else { internalLbVm.setTemplateVersion(versionAnswer.getTemplateVersion()); @@ -606,11 +606,11 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In List internalLbVms = new ArrayList(); final Network lock = _networkDao.acquireInLockTable(guestNetwork.getId(), NetworkOrchestrationService.NetworkLockTimeout.value()); if (lock == null) { - throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock network %s", guestNetwork)); } if (logger.isDebugEnabled()) { - logger.debug("Lock is acquired for network id " + lock.getId() + " as a part of internal lb startup in " + dest); + logger.debug(String.format("Lock is acquired for network %s as a part of internal lb startup in %s", lock, dest)); } final long internalLbProviderId = getInternalLbProviderId(guestNetwork); @@ -647,7 +647,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (lock != null) { _networkDao.releaseFromLockTable(lock.getId()); if (logger.isDebugEnabled()) { - logger.debug("Lock is released for network id " + lock.getId() + " as a part of internal lb vm startup in " + dest); + logger.debug(String.format("Lock is released for network id %s as a part of internal lb vm startup in %s", lock, dest)); } } } @@ -665,7 +665,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In final VirtualRouterProvider internalLbProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), type); if (internalLbProvider == null) { - throw new CloudRuntimeException("Cannot find provider " + type.toString() + " as service provider " + provider.getId()); + throw new CloudRuntimeException(String.format("Cannot find provider %s as service provider %s", type.toString(), provider)); } return internalLbProvider.getId(); @@ -880,10 +880,10 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In if (lbVm.getState() == State.Running) { return sendLBRules(lbVm, rules, network.getId()); } else if (lbVm.getState() == State.Stopped || lbVm.getState() == State.Stopping) { - logger.debug("Internal LB VM " + lbVm.getInstanceName() + " is in " + lbVm.getState() + ", so not sending apply lb rules commands to the backend"); + logger.debug(String.format("Internal LB VM %s is in %s, so not sending apply lb rules commands to the backend", lbVm, lbVm.getState())); return true; } else { - logger.warn("Unable to apply lb rules, Internal LB VM is not in the right state " + lbVm.getState()); + logger.warn(String.format("Unable to apply lb rules, Internal LB VM %s is not in the right state %s", lbVm, lbVm.getState())); throw new ResourceUnavailableException("Unable to apply lb rules; Internal LB VM is not in the right state", DataCenter.class, lbVm.getDataCenterId()); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java index 44cbc6c305f..c16f8e3601c 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java @@ -119,9 +119,9 @@ public class ContrailElementImpl extends AdapterBase @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("NetworkElement implement: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug(String.format("NetworkElement implement: %s, traffic type: %s", network, network.getTrafficType())); if (network.getTrafficType() == TrafficType.Guest) { - logger.debug("ignore network " + network.getName()); + logger.debug(String.format("ignore network %s", network)); return true; } VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); @@ -145,15 +145,13 @@ public class ContrailElementImpl extends AdapterBase public boolean prepare(Network network, NicProfile nicProfile, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("NetworkElement prepare: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug(String.format("NetworkElement prepare: %s, traffic type: %s", network, network.getTrafficType())); if (network.getTrafficType() == TrafficType.Guest) { - logger.debug("ignore network " + network.getName()); + logger.debug(String.format("ignore network %s", network)); return true; } - logger.debug("network: " + network.getId()); - VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { @@ -210,7 +208,7 @@ public class ContrailElementImpl extends AdapterBase if (network.getTrafficType() == TrafficType.Guest) { return true; } else if (!_manager.isManagedPhysicalNetwork(network)) { - logger.debug("release ignore network " + network.getId()); + logger.debug(String.format("release ignore network %s", network)); return true; } @@ -219,7 +217,7 @@ public class ContrailElementImpl extends AdapterBase VirtualMachineModel vmModel = _manager.getDatabase().lookupVirtualMachine(vm.getUuid()); if (vmModel == null) { - logger.debug("vm " + vm.getInstanceName() + " not in local database"); + logger.debug(String.format("vm %s not in local database", vm)); return true; } VMInterfaceModel vmiModel = vmModel.getVMInterface(nic.getUuid()); @@ -272,7 +270,7 @@ public class ContrailElementImpl extends AdapterBase List systemNets = _manager.findSystemNetworks(types); if (systemNets != null && !systemNets.isEmpty()) { for (NetworkVO net: systemNets) { - logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + logger.debug(String.format("update system network service: %s; service provider: %s", net, serviceMap)); _networksDao.update(net.getId(), net, serviceMap); } } else { @@ -284,7 +282,7 @@ public class ContrailElementImpl extends AdapterBase systemNets = _manager.findSystemNetworks(types); if (systemNets != null && !systemNets.isEmpty()) { for (NetworkVO net: systemNets) { - logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + logger.debug(String.format("update system network service: %s; service provider: %s", net, serviceMap)); _networksDao.update(net.getId(), net, serviceMap); } } else { diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java index 345cdc1e6c6..9346abab21d 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java @@ -132,7 +132,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { network.setCidr(userSpecified.getCidr()); network.setGateway(userSpecified.getGateway()); } - logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network {}{}", userSpecified, network.getCidr() == null ? "" : " subnet: " + network.getCidr()); return network; } @@ -144,7 +144,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public Network implement(Network network, NetworkOffering offering, DeployDestination destination, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - logger.debug("Implement network: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("Implement network: {}, traffic type: {}", network, network.getTrafficType()); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { @@ -191,7 +191,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public NicProfile allocate(Network network, NicProfile profile, VirtualMachineProfile vm) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - logger.debug("allocate NicProfile on " + network.getName()); + logger.debug(String.format("allocate NicProfile on %s", network)); if (profile != null && profile.getRequestedIPv4() != null) { throw new CloudRuntimeException("Does not support custom ip allocation at this time: " + profile); @@ -218,8 +218,8 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - logger.debug("reserve NicProfile on network id: " + network.getId() + " " + network.getName()); - logger.debug("deviceId: " + nic.getDeviceId()); + logger.debug("reserve NicProfile on network: " + network); + logger.debug(String.format("nic: %s deviceId: %d", nic, nic.getDeviceId())); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -275,7 +275,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { if (nic.getMacAddress() == null) { MacAddressesType macs = vmi.getMacAddresses(); if (macs == null) { - logger.debug("no mac address is allocated for Nic " + nicVO.getUuid()); + logger.debug(String.format("no mac address is allocated for Nic %s", nicVO)); } else { logger.info("VMI " + _manager.getVifNameByVmUuid(vm.getUuid(), nicVO.getDeviceId()) + " got mac address: " + macs.getMacAddress().get(0)); nic.setMacAddress(macs.getMacAddress().get(0)); @@ -299,7 +299,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { - logger.debug("release NicProfile " + nic.getId()); + logger.debug(String.format("release NicProfile %s", nic)); return true; } @@ -309,7 +309,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { */ @Override public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - logger.debug("deallocate NicProfile " + nic.getId() + " on " + network.getName()); + logger.debug(String.format("deallocate NicProfile %s on %s", nic, network.getName())); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -343,7 +343,7 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Override public void updateNicProfile(NicProfile profile, Network network) { // TODO Auto-generated method stub - logger.debug("update NicProfile " + profile.getId() + " on " + network.getName()); + logger.debug(String.format("update NicProfile %s on %s", profile, network)); } @Override diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java index dc453f75c15..29d0b3a51e8 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java @@ -121,7 +121,7 @@ public class ManagementNetworkGuru extends ContrailGuru { network.setCidr(_mgmtCidr); network.setGateway(_mgmtGateway); } - logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network " + userSpecified + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); return network; } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java index 479ef2a0e5d..7a1833302d6 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java @@ -79,7 +79,7 @@ public class VirtualMachineModel extends ModelObjectBase { setProperties(controller, instance); UserVm userVm = controller.getVmDao().findById(instance.getId()); if (userVm != null && userVm.getUserData() != null) { - logger.debug("vm " + instance.getInstanceName() + " user data: " + userVm.getUserData()); + logger.debug(String.format("vm %s user data: %s", instance, userVm.getUserData())); final Gson json = new Gson(); Map kvmap = json.fromJson(userVm.getUserData(), new TypeToken>() { }.getType()); @@ -99,7 +99,7 @@ public class VirtualMachineModel extends ModelObjectBase { } else { // Throw a CloudRuntimeException in case the UUID is not valid. String message = "Invalid UUID ({0}) given for the service-instance for VM {1}."; - message = MessageFormat.format(message, instance.getId(), serviceUuid); + message = MessageFormat.format(message, serviceUuid, instance); logger.warn(message); throw new CloudRuntimeException(message); } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java index 08a4609c43e..c27c57516e0 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java @@ -413,7 +413,8 @@ public class VirtualNetworkModel extends ModelObjectBase { diff.removeAll(vncSubnets); if (!diff.isEmpty()) { - logger.debug("Subnets changed, network: " + _name + "; db: " + dbSubnets + ", vnc: " + vncSubnets + ", diff: " + diff); + logger.debug(String.format("Subnets changed, network: [id: %d, uuid: %s, name: %s]; " + + "db: %s, vnc: %s, diff: %s", _id, _uuid, _name, dbSubnets, vncSubnets, diff)); return false; } @@ -500,7 +501,7 @@ public class VirtualNetworkModel extends ModelObjectBase { diff.removeAll(newSubnets); if (!diff.isEmpty()) { - logger.debug("Subnets differ, network: " + _name + "; db: " + currentSubnets + ", vnc: " + newSubnets + ", diff: " + diff); + logger.debug(String.format("Subnets differ, network: [id: %d, uuid: %s, name: %s]; db: %s, vnc: %s, diff: %s", _id, _uuid, _name, currentSubnets, newSubnets, diff)); return false; } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java index 48b9006f34c..096b400ee93 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java @@ -327,16 +327,14 @@ IpDeployer, StaticNatServiceProvider, GslbServiceProvider { // allocate a load balancer device for the network lbDeviceVO = allocateNCCResourceForNetwork(guestConfig); if (lbDeviceVO == null) { - String msg = "failed to allocate Netscaler ControlCenter Resource for the zone in the network " - + guestConfig.getId(); + String msg = String.format("failed to allocate Netscaler ControlCenter Resource for the zone in the network %s", guestConfig); logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } netscalerControlCenter = _hostDao.findById(lbDeviceVO.getId()); - logger.debug("Allocated Netscaler Control Center device:" + lbDeviceVO.getId() + " for the network: " - + guestConfig.getId()); + logger.debug("Allocated Netscaler Control Center device: {} for the network: {}", lbDeviceVO, guestConfig); } else { // find the load balancer device allocated for the network diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java index 7b2ef012bed..c3d4cf4b24e 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java @@ -269,13 +269,13 @@ public class NetScalerVMManagerImpl extends ManagerBase implements NetScalerVMMa ServiceOfferingVO vpxOffering = _serviceOfferingDao.findById(svcOffId); //using 2GB and 2CPU offering if(vpxOffering.getRamSize() < 2048 && vpxOffering.getCpu() <2 ) { - throw new InvalidParameterValueException("Specified Service Offering :" + vpxOffering.getUuid() + " NS Vpx cannot be deployed. Min 2GB Ram and 2 CPU are required"); + throw new InvalidParameterValueException(String.format("Specified Service Offering: %s NS Vpx cannot be deployed. Min 2GB Ram and 2 CPU are required", vpxOffering)); } long userId = CallContext.current().getCallingUserId(); //TODO change the os bits from 142 103 to the actual guest of bits if(template.getGuestOSId() != 103 ) { - throw new InvalidParameterValueException("Specified Template " + template.getUuid()+ " not suitable for NS VPX Deployment. Please register the template with guest os type as unknow(64-bit)"); + throw new InvalidParameterValueException(String.format("Specified Template %s not suitable for NS VPX Deployment. Please register the template with guest os type as unknown(64-bit)", template)); } NetworkVO defaultNetwork = null; diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java index 356b452a9e4..278d058a289 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java @@ -187,18 +187,18 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { } protected boolean canHandle(Network network, Service service) { - logger.debug("Checking if NiciraNvpElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if NiciraNvpElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Lswitch) { return false; } if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("NiciraNvpElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("NiciraNvpElement is not a provider for network %s", network)); return false; } if (!ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.NiciraNvp)) { - logger.debug("NiciraNvpElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug(String.format("NiciraNvpElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -215,7 +215,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("entering NiciraNvpElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug(String.format("entering NiciraNvpElement implement function for network %s (state %s)", network, network.getState())); if (!canHandle(network, Service.Connectivity)) { return false; @@ -276,7 +276,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { context.getAccount().getAccountName()); CreateLogicalRouterAnswer answer = (CreateLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to create Logical Router for network " + network.getDisplayText()); + logger.error(String.format("Failed to create Logical Router for network %s", network)); return false; } @@ -313,7 +313,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { new ConfigureSharedNetworkUuidCommand(lRouterUuid, lSwitchUuid, portIpAddress, ownerName, network.getId()); ConfigureSharedNetworkUuidAnswer answer = (ConfigureSharedNetworkUuidAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to configure Logical Router for Shared network " + network.getDisplayText()); + logger.error(String.format("Failed to configure Logical Router for Shared network %s", network)); return false; } return true; @@ -332,7 +332,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { new ConfigureSharedNetworkVlanIdCommand(lSwitchUuid, l2GatewayServiceUuid , vlanId, ownerName, network.getId()); ConfigureSharedNetworkVlanIdAnswer answer = (ConfigureSharedNetworkVlanIdAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to configure Shared network " + network.getDisplayText()); + logger.error(String.format("Failed to configure Shared network %s", network)); return false; } } @@ -431,7 +431,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpNicMappingVO nicMap = niciraNvpNicMappingDao.findByNicUuid(nicVO.getUuid()); if (nicMap == null) { - logger.error("No mapping for nic " + nic.getName()); + logger.error(String.format("No mapping for nic %s", nic)); return false; } @@ -470,7 +470,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { // nat rules. NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.warn("No logical router uuid found for network " + network.getDisplayText()); + logger.warn(String.format("No logical router uuid found for network %s", network)); // This might be cause by a failed deployment, so don't make shutdown fail as well. return true; } @@ -478,7 +478,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { DeleteLogicalRouterCommand cmd = new DeleteLogicalRouterCommand(routermapping.getLogicalRouterUuid()); DeleteLogicalRouterAnswer answer = (DeleteLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to delete LogicalRouter for network " + network.getDisplayText()); + logger.error(String.format("Failed to delete LogicalRouter for network %s", network)); return false; } @@ -582,11 +582,9 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { final PhysicalNetworkServiceProviderVO ntwkSvcProvider = physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); } if (niciraNvpDao.listByPhysicalNetwork(physicalNetworkId).size() != 0) { @@ -814,7 +812,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error(String.format("No logical router uuid found for network %s", network)); return false; } @@ -858,7 +856,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error(String.format("No logical router uuid found for network %s", network)); return false; } @@ -898,7 +896,7 @@ NiciraNvpElementService, ResourceStateAdapter, IpDeployer { NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error(String.format("No logical router uuid found for network %s", network)); return false; } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java index daf2420b528..d366169fcc7 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java @@ -147,10 +147,10 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru implements Netwo final List devices = niciraNvpDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - logger.error("No NiciraNvp Controller on physical network " + physnet.getName()); + logger.error("No NiciraNvp Controller on physical network {}", physnet); return null; } - logger.debug("Nicira Nvp " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Nicira Nvp {} found on physical network {}", devices.get(0).getUuid(), physnet); logger.debug("Physical isolation type is supported, asking GuestNetworkGuru to design this network"); final NetworkVO networkObject = (NetworkVO) super.design(offering, plan, userSpecified, name, vpcId, owner); @@ -276,7 +276,7 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru implements Netwo public void shutdown(final NetworkProfile profile, final NetworkOffering offering) { final NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Lswitch || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } @@ -308,7 +308,7 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru implements Netwo NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(networkObject.getId()); if (routermapping == null) { // Case 1: Numerical Vlan Provided -> No lrouter used. - logger.info("Shared Network " + networkObject.getDisplayText() + " didn't use Logical Router"); + logger.info(String.format("Shared Network %s didn't use Logical Router", networkObject)); } else { //Case 2: Logical Router's UUID provided as Vlan id -> Remove lrouter port but not lrouter. diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java index 7673e5a6038..e1b37a8d653 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java @@ -405,7 +405,7 @@ public class NsxElement extends AdapterBase implements DhcpServiceProvider, Dns boolean forNsx = false; List physicalNetworks = physicalNetworkDao.listByZoneAndTrafficType(zone.getId(), Networks.TrafficType.Guest); if (CollectionUtils.isNullOrEmpty(physicalNetworks)) { - String err = String.format("Desired physical network is not present in the zone %s for traffic type %s. ", zone.getName(), Networks.TrafficType.Guest.name()); + String err = String.format("Desired physical network is not present in the zone %s for traffic type %s. ", zone, Networks.TrafficType.Guest.name()); logger.error(err); throw new InvalidConfigurationException(err); } @@ -498,11 +498,10 @@ public class NsxElement extends AdapterBase implements DhcpServiceProvider, Dns } protected boolean canHandle(Network network, Network.Service service) { - logger.debug("Checking if Nsx Element can handle service " + service.getName() + " on network " - + network.getDisplayText()); + logger.debug("Checking if Nsx Element can handle service {} on network {}", service.getName(), network); if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("Nsx Element is not a provider for network " + network.getDisplayText()); + logger.debug("Nsx Element is not a provider for network {}", network); return false; } @@ -577,16 +576,16 @@ public class NsxElement extends AdapterBase implements DhcpServiceProvider, Dns if (Arrays.asList(FirewallRule.State.Add, FirewallRule.State.Active).contains(rule.getState())) { if ((ruleDetail == null && FirewallRule.State.Add == rule.getState()) || (ruleDetail != null && !ruleDetail.getValue().equalsIgnoreCase("true"))) { logger.debug("Creating port forwarding rule on NSX for VM {} to ports {} - {}", - vm.getUuid(), rule.getDestinationPortStart(), rule.getDestinationPortEnd()); + vm, rule.getDestinationPortStart(), rule.getDestinationPortEnd()); NsxAnswer answer = nsxService.createPortForwardRule(networkRule); boolean pfRuleResult = answer.getResult(); if (pfRuleResult && !answer.isObjectExistent()) { - logger.debug("Port forwarding rule {} created on NSX, adding detail on firewall rules details", rule.getId()); + logger.debug("Port forwarding rule {} created on NSX, adding detail on firewall rules details", rule); if (ruleDetail == null && FirewallRule.State.Add == rule.getState()) { - logger.debug("Adding new firewall detail for rule {}", rule.getId()); + logger.debug("Adding new firewall detail for rule {}", rule); firewallRuleDetailsDao.addDetail(rule.getId(), ApiConstants.FOR_NSX, "true", false); } else { - logger.debug("Updating firewall detail for rule {}", rule.getId()); + logger.debug("Updating firewall detail for rule {}", rule); ruleDetail.setValue("true"); firewallRuleDetailsDao.update(ruleDetail.getId(), ruleDetail); } @@ -597,7 +596,7 @@ public class NsxElement extends AdapterBase implements DhcpServiceProvider, Dns if (ruleDetail == null || (ruleDetail != null && ruleDetail.getValue().equalsIgnoreCase("true"))) { boolean pfRuleResult = nsxService.deletePortForwardRule(networkRule); if (pfRuleResult && ruleDetail != null) { - logger.debug("Updating firewall rule detail {} for rule {}, set to false", ruleDetail.getId(), rule.getId()); + logger.debug("Updating firewall rule detail {} () for rule {}, set to false", ruleDetail.getId(), ruleDetail.getName(), rule); ruleDetail.setValue("false"); firewallRuleDetailsDao.update(ruleDetail.getId(), ruleDetail); } diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxGuestNetworkGuru.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxGuestNetworkGuru.java index 032967d4061..0ab622c1808 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxGuestNetworkGuru.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxGuestNetworkGuru.java @@ -218,7 +218,7 @@ public class NsxGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr } VpcVO vpc = _vpcDao.findById(network.getVpcId()); if (Objects.isNull(vpc)) { - String msg = String.format("Unable to find VPC with id: %s, allocating for network %s", network.getVpcId(), network.getName()); + String msg = String.format("Unable to find VPC with id: %s, allocating for network %s", network.getVpcId(), network); logger.debug(msg); } @@ -239,12 +239,12 @@ public class NsxGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr PublicIpAddress ipAddress = networkModel.getSourceNatIpAddressForGuestNetwork(account, network); String translatedIp = ipAddress.getAddress().addr(); String tier1GatewayName = NsxControllerUtils.getTier1GatewayName(domainId, accountId, dataCenterId, resourceId, false); - logger.debug(String.format("Creating NSX NAT Rule for Tier1 GW %s for translated IP %s for Isolated network %s", tier1GatewayName, translatedIp, network.getName())); + logger.debug("Creating NSX NAT Rule for Tier1 GW {} for translated IP {} for Isolated network {}", tier1GatewayName, translatedIp, network); String natRuleId = NsxControllerUtils.getNsxNatRuleId(domainId, accountId, dataCenterId, resourceId, false); CreateOrUpdateNsxTier1NatRuleCommand cmd = NsxHelper.createOrUpdateNsxNatRuleCommand(domainId, accountId, dataCenterId, tier1GatewayName, "SNAT", translatedIp, natRuleId); NsxAnswer nsxAnswer = nsxControllerUtils.sendNsxCommand(cmd, dataCenterId); if (!nsxAnswer.getResult()) { - String msg = String.format("Could not create NSX NAT Rule on Tier1 Gateway %s for IP %s for Isolated network %s", tier1GatewayName, translatedIp, network.getName()); + String msg = String.format("Could not create NSX NAT Rule on Tier1 Gateway %s for IP %s for Isolated network %s", tier1GatewayName, translatedIp, network); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -256,7 +256,7 @@ public class NsxGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr CreateNsxDhcpRelayConfigCommand command = NsxHelper.createNsxDhcpRelayConfigCommand(domain, account, zone, vpc, network, addresses); NsxAnswer answer = nsxControllerUtils.sendNsxCommand(command, zone.getId()); if (!answer.getResult()) { - String msg = String.format("Error creating DHCP relay config for network %s and nic %s: %s", network.getName(), nic.getName(), answer.getDetails()); + String msg = String.format("Error creating DHCP relay config for network %s and nic %s: %s", network, nic, answer.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -319,7 +319,7 @@ public class NsxGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr } vpcName = vpc.getName(); } else { - logger.debug(String.format("Creating a Tier 1 Gateway for the network %s before creating the NSX segment", networkVO.getName())); + logger.debug("Creating a Tier 1 Gateway for the network {} before creating the NSX segment", networkVO); long networkOfferingId = networkVO.getNetworkOfferingId(); NetworkOfferingVO networkOfferingVO = networkOfferingDao.findById(networkOfferingId); boolean isSourceNatSupported = !NetworkOffering.NetworkMode.ROUTED.equals(networkOfferingVO.getNetworkMode()) && @@ -328,7 +328,7 @@ public class NsxGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigr NsxAnswer nsxAnswer = nsxControllerUtils.sendNsxCommand(nsxTier1GatewayCommand, zone.getId()); if (!nsxAnswer.getResult()) { - String msg = String.format("Could not create a Tier 1 Gateway for network %s: %s", networkVO.getName(), nsxAnswer.getDetails()); + String msg = String.format("Could not create a Tier 1 Gateway for network %s: %s", networkVO, nsxAnswer.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxPublicNetworkGuru.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxPublicNetworkGuru.java index 4df71056601..c7931b46c82 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxPublicNetworkGuru.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxPublicNetworkGuru.java @@ -116,7 +116,7 @@ public class NsxPublicNetworkGuru extends PublicNetworkGuru { // For NSX, use VR Public IP != Source NAT List ips = _ipAddressDao.listByAssociatedVpc(vpc.getId(), true); if (CollectionUtils.isEmpty(ips)) { - String err = String.format("Cannot find a source NAT IP for the VPC %s", vpc.getName()); + String err = String.format("Cannot find a source NAT IP for the VPC %s", vpc); logger.error(err); throw new CloudRuntimeException(err); } @@ -136,10 +136,10 @@ public class NsxPublicNetworkGuru extends PublicNetworkGuru { boolean sourceNatEnabled = !NetworkOffering.NetworkMode.ROUTED.equals(vpcVO.getNetworkMode()) && vpcOfferingServiceMapDao.areServicesSupportedByVpcOffering(vpc.getVpcOfferingId(), services); - logger.info(String.format("Creating Tier 1 Gateway for VPC %s", vpc.getName())); + logger.info("Creating Tier 1 Gateway for VPC {}", vpc); boolean result = nsxService.createVpcNetwork(dataCenterId, accountId, domainId, resourceId, vpc.getName(), sourceNatEnabled); if (!result) { - String msg = String.format("Error creating Tier 1 Gateway for VPC %s", vpc.getName()); + String msg = String.format("Error creating Tier 1 Gateway for VPC %s", vpc); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java index 139d8a55e59..64a2514fc51 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java @@ -73,13 +73,13 @@ public class NsxServiceImpl implements NsxService, Configurable { long zoneId = vpc.getZoneId(); long vpcId = vpc.getId(); - logger.debug(String.format("Updating the source NAT IP for NSX VPC %s to IP: %s", vpc.getName(), address.getAddress().addr())); + logger.debug("Updating the source NAT IP for NSX VPC {} to IP: {}", vpc, address.getAddress().addr()); String tier1GatewayName = NsxControllerUtils.getTier1GatewayName(domainId, accountId, zoneId, vpcId, true); String sourceNatRuleId = NsxControllerUtils.getNsxNatRuleId(domainId, accountId, zoneId, vpcId, true); CreateOrUpdateNsxTier1NatRuleCommand cmd = NsxHelper.createOrUpdateNsxNatRuleCommand(domainId, accountId, zoneId, tier1GatewayName, "SNAT", address.getAddress().addr(), sourceNatRuleId); NsxAnswer answer = nsxControllerUtils.sendNsxCommand(cmd, zoneId); if (!answer.getResult()) { - logger.error(String.format("Could not update the source NAT IP address for VPC %s: %s", vpc.getName(), answer.getDetails())); + logger.error("Could not update the source NAT IP address for VPC {}: {}", vpc, answer.getDetails()); return false; } return true; @@ -109,7 +109,7 @@ public class NsxServiceImpl implements NsxService, Configurable { network.getVpcId(), vpcName, network.getId(), network.getName()); NsxAnswer result = nsxControllerUtils.sendNsxCommand(deleteNsxSegmentCommand, network.getDataCenterId()); if (!result.getResult()) { - String msg = String.format("Could not remove the NSX segment for network %s: %s", network.getName(), result.getDetails()); + String msg = String.format("Could not remove the NSX segment for network %s: %s", network, result.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java index 7b4851fc285..42aebf0182a 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java @@ -107,10 +107,10 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - logger.error("No Controller on physical network " + physnet.getName()); + logger.error("No Controller on physical network {}", physnet); return null; } - logger.debug("Controller " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Controller {} found on physical network {}", devices.get(0).getUuid(), physnet); logger.debug("Physical isolation type is ODL, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, name, vpcId, owner); @@ -194,7 +194,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { AddHypervisorCommand addCmd = new AddHypervisorCommand(dest.getHost().getUuid(), dest.getHost().getPrivateIpAddress()); AddHypervisorAnswer addAnswer = (AddHypervisorAnswer)agentManager.easySend(controller.getHostId(), addCmd); if (addAnswer == null || !addAnswer.getResult()) { - logger.error("Failed to add " + dest.getHost().getName() + " as a node to the controller"); + logger.error(String.format("Failed to add %s as a node to the controller", dest.getHost())); throw new InsufficientVirtualNetworkCapacityException("Failed to add destination hypervisor to the OpenDaylight Controller", dest.getPod().getId()); } @@ -241,7 +241,7 @@ public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.OpenDaylight || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java index 8bf68f0c289..c4b3d68de18 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java @@ -106,11 +106,11 @@ public class OpenDaylightControllerResourceManagerImpl implements OpenDaylightCo final PhysicalNetworkServiceProviderVO ntwkSvcProvider = physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", + networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", + ntwkSvcProvider.getProviderName(), physicalNetwork)); } final Map hostParams = new HashMap(); diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java index 69891954264..b8f4e0c73ff 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java @@ -114,22 +114,19 @@ StaticNatServiceProvider, IpDeployer { } protected boolean canHandle(final Network network, final Service service) { - logger.debug("Checking if OvsElement can handle service " - + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if OvsElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Vswitch) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("OvsElement is not a provider for network " - + network.getDisplayText()); + logger.debug(String.format("OvsElement is not a provider for network %s", network)); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.Ovs)) { - logger.debug("OvsElement can't provide the " + service.getName() - + " service on network " + network.getDisplayText()); + logger.debug(String.format("OvsElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -149,9 +146,7 @@ StaticNatServiceProvider, IpDeployer { final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("entering OvsElement implement function for network " - + network.getDisplayText() + " (state " + network.getState() - + ")"); + logger.debug(String.format("entering OvsElement implement function for network %s (state %s)", network, network.getState())); if (!canHandle(network, Service.Connectivity)) { return false; @@ -437,9 +432,8 @@ StaticNatServiceProvider, IpDeployer { final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual " - + "router doesn't exist in the network " - + network.getId()); + logger.debug(String.format("Virtual router element doesn't need to associate ip" + + " addresses on the backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -462,8 +456,8 @@ StaticNatServiceProvider, IpDeployer { final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Ovs element doesn't need to apply static nat on the backend; virtual " - + "router doesn't exist in the network " + network.getId()); + logger.debug(String.format("Ovs element doesn't need to apply static nat on the " + + "backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -485,8 +479,8 @@ StaticNatServiceProvider, IpDeployer { final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Ovs element doesn't need to apply firewall rules on the backend; virtual " - + "router doesn't exist in the network " + network.getId()); + logger.debug(String.format("Ovs element doesn't need to apply firewall rules on the" + + " backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -511,9 +505,8 @@ StaticNatServiceProvider, IpDeployer { final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply load balancing rules on the backend; virtual " - + "router doesn't exist in the network " - + network.getId()); + logger.debug(String.format("Virtual router element doesn't need to apply load " + + "balancing rules on the backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -523,7 +516,7 @@ StaticNatServiceProvider, IpDeployer { for (final DomainRouterVO domainRouterVO : routers) { result = result && networkTopology.applyLoadBalancingRules(network, rules, domainRouterVO); if (!result) { - logger.debug("Failed to apply load balancing rules in network " + network.getId()); + logger.debug(String.format("Failed to apply load balancing rules in network %s", network)); } } } @@ -603,18 +596,13 @@ StaticNatServiceProvider, IpDeployer { } if (expire != null && !containsOnlyNumbers(expire, timeEndChar)) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " + rule.getId() - + " Cause: expire is not in timeformat: " - + expire); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: expire is not in time format: %s", + rule.getUuid(), expire)); } if (tablesize != null && !containsOnlyNumbers(tablesize, "kmg")) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " - + rule.getId() - + " Cause: tablesize is not in size format: " - + tablesize); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: table size is not in size format: %s", + rule.getUuid(), tablesize)); } } else if (StickinessMethodType.AppCookieBased.getName() @@ -634,18 +622,14 @@ StaticNatServiceProvider, IpDeployer { } if (length != null && !containsOnlyNumbers(length, null)) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " + rule.getId() - + " Cause: length is not a number: " - + length); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: length is not a number: %s", + rule.getUuid(), length)); } if (holdTime != null && !containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers( holdTime, null)) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " + rule.getId() - + " Cause: holdtime is not in timeformat: " - + holdTime); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: holdtime is not in time format: %s", + rule.getUuid(), holdTime)); } } } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java index 97531a91537..0a9eeea496f 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java @@ -187,13 +187,12 @@ public class OvsGuestNetworkGuru extends GuestNetworkGuru { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vswitch || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " - + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } if (profile.getBroadcastDomainType() == BroadcastDomainType.Vswitch ) { - logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug(String.format("Releasing vnet for the network %s", profile)); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java index c99a6fd5de3..804f29c01b1 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java @@ -265,9 +265,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage //for network with label on target host Commands fetchIfaceCmds = new Commands(new OvsFetchInterfaceCommand(physNetLabel)); - logger.debug("Ask host " + host.getId() + - " to retrieve interface for phy net with label:" + - physNetLabel); + logger.debug(String.format("Ask host %s to retrieve interface for phy net with label: %s", host, physNetLabel)); Answer[] fetchIfaceAnswers = _agentMgr.send(host.getId(), fetchIfaceCmds); //And finally save it for future use endpointIp = handleFetchInterfaceAnswer(fetchIfaceAnswers, host.getId()); @@ -318,7 +316,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage OvsTunnelNetworkVO ta = _tunnelNetworkDao.findByFromToNetwork(hostId, rh.longValue(), nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug(String.format("Attempting to create tunnel from: %s to: %d", host, rh)); if (ta == null) { createTunnelRecord(hostId, rh.longValue(), nw.getId(), key); } @@ -331,8 +329,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage hostId, nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + - rh.longValue() + " to:" + hostId); + logger.debug(String.format("Attempting to create tunnel from: %d to: %s", rh, host)); if (ta == null) { createTunnelRecord(rh.longValue(), hostId, nw.getId(), key); @@ -346,22 +343,19 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage try { String myIp = getGreEndpointIP(host, nw); if (myIp == null) - throw new GreTunnelException("Unable to retrieve the source " + "endpoint for the GRE tunnel." + "Failure is on host:" + host.getId()); + throw new GreTunnelException(String.format("Unable to retrieve the source endpoint for the GRE tunnel. Failure is on host: %s", host)); boolean noHost = true; for (Long i : toHostIds) { HostVO rHost = _hostDao.findById(i); String otherIp = getGreEndpointIP(rHost, nw); if (otherIp == null) throw new GreTunnelException( - "Unable to retrieve the remote " - + "endpoint for the GRE tunnel." - + "Failure is on host:" + rHost.getId()); + String.format("Unable to retrieve the remote endpoint for the GRE tunnel. Failure is on host: %s", rHost)); Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, nw.getId(), myIp, bridgeName, nw.getUuid())); - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + nw.getId()); - logger.debug("Ask host " + hostId - + " to create gre tunnel to " + i); + logger.debug(String.format("Attempting to create tunnel from %s to %s for the network %s", host, rHost, nw)); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", host, rHost)); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); noHost = false; @@ -372,8 +366,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage String otherIp = getGreEndpointIP(rHost, nw); Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), nw.getId(), otherIp, bridgeName, nw.getUuid())); - logger.debug("Ask host " + i + " to create gre tunnel to " - + hostId); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", rHost, host)); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); noHost = false; @@ -383,7 +376,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage // anyway. This will ensure VIF rules will be triggered if (noHost) { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, nw.getId())); - logger.debug("Ask host " + hostId + " to configure bridge for network:" + nw.getId()); + logger.debug(String.format("Ask host %s to configure bridge for network:%s", host, nw)); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } @@ -451,7 +444,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage _tunnelNetworkDao.releaseFromLockTable(lock.getId()); logger.debug(String.format("Destroy bridge for" + - "network %1$s successful", networkId)); + "network %1$s successful", lock)); } else { logger.debug(String.format("Destroy bridge for" + "network %1$s failed", networkId)); @@ -487,8 +480,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage if (p.getState().equals(OvsTunnel.State.Established.name())) { Command cmd= new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - logger.debug("Destroying tunnel to " + host.getId() + - " from " + p.getFrom()); + logger.debug(String.format("Destroying tunnel to %s from %d", host, p.getFrom())); Answer ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), p.getTo(), p.getNetworkId()); } @@ -497,7 +489,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage Command cmd = new OvsDestroyBridgeCommand(nw.getId(), generateBridgeNameForVpc(nw.getVpcId()), host.getId()); - logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug(String.format("Destroying bridge for network %s on host: %s", nw, host)); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); } catch (Exception e) { @@ -515,7 +507,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage int key = getGreKey(nw); String bridgeName = generateBridgeName(nw, key); Command cmd = new OvsDestroyBridgeCommand(nw.getId(), bridgeName, host.getId()); - logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug(String.format("Destroying bridge for network %s on host: %s", nw, host)); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); @@ -528,8 +520,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage if (p.getState().equals(OvsTunnel.State.Established.name())) { cmd = new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - logger.debug("Destroying tunnel to " + host.getId() + - " from " + p.getFrom()); + logger.debug(String.format("Destroying tunnel to %s from %d", host, p.getFrom())); ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), p.getTo(), p.getNetworkId()); @@ -565,8 +556,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage // since this is the first VM from the VPC being launched on the host, first setup the bridge try { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, null)); - logger.debug("Ask host " + hostId + " to create bridge for vpc " + vpcId + " and configure the " - + " bridge for distributed routing."); + logger.debug(String.format("Ask host %s to create bridge for vpc %d and configure the bridge for distributed routing.", host, vpcId)); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } catch (OperationTimedoutException | AgentUnavailableException e) { @@ -578,8 +568,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage cmd.setSequenceNumber(getNextRoutingPolicyUpdateSequenceNumber(vpcId)); if (!sendVpcRoutingPolicyChangeUpdate(cmd, hostId, bridgeName)) { - logger.debug("Failed to send VPC routing policy change update to host : " + hostId + - ". But moving on with sending the updates to the rest of the hosts."); + logger.debug(String.format("Failed to send VPC routing policy change update to host: %s. But moving on with sending the updates to the rest of the hosts.", host)); } } @@ -602,7 +591,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage tunnelRecord = _tunnelNetworkDao.findByFromToNetwork(hostId, rh.longValue(), vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug(String.format("Attempting to create tunnel from: %s to: %d", host, rh)); if (tunnelRecord == null) { createTunnelRecord(hostId, rh.longValue(), vpcNetwork.getId(), key); } @@ -613,7 +602,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage tunnelRecord = _tunnelNetworkDao.findByFromToNetwork(rh.longValue(), hostId, vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); + logger.debug(String.format("Attempting to create tunnel from: %d to: %s", rh, host)); if (tunnelRecord == null) { createTunnelRecord(rh.longValue(), hostId, vpcNetwork.getId(), key); } @@ -626,23 +615,18 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage try { String myIp = getGreEndpointIP(host, vpcNetwork); if (myIp == null) - throw new GreTunnelException("Unable to retrieve the source " + "endpoint for the GRE tunnel." - + "Failure is on host:" + host.getId()); + throw new GreTunnelException(String.format("Unable to retrieve the source endpoint for the GRE tunnel.Failure is on host: %s", host)); boolean noHost = true; for (Long i : toHostIds) { HostVO rHost = _hostDao.findById(i); String otherIp = getGreEndpointIP(rHost, vpcNetwork); if (otherIp == null) - throw new GreTunnelException( - "Unable to retrieve the remote endpoint for the GRE tunnel." - + "Failure is on host:" + rHost.getId()); + throw new GreTunnelException(String.format("Unable to retrieve the remote endpoint for the GRE tunnel. Failure is on host: %s", rHost)); Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, vpcNetwork.getId(), myIp, bridgeName, vpcNetwork.getUuid())); - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " - + vpcNetwork.getId()); - logger.debug("Ask host " + hostId - + " to create gre tunnel to " + i); + logger.debug(String.format("Attempting to create tunnel from: %s to: %s for the network %s", host, rHost, vpcNetwork)); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", host, rHost)); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); } @@ -653,8 +637,7 @@ public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManage Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), vpcNetwork.getId(), otherIp, bridgeName, vpcNetwork.getUuid())); - logger.debug("Ask host " + i + " to create gre tunnel to " - + hostId); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", rHost, host)); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java index 19bf0a33909..d4ee924858e 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java @@ -150,7 +150,7 @@ public class ConfigTungstenFabricServiceCmd extends BaseCmd { if (networkServiceMapDao.canProviderSupportServiceInNetwork(network.getId(), service, provider)) { logger.debug(String.format("A mapping between the network, service and provider (%s, %s, %s) " + "already exists, skipping duplicated entry", - network.getId(), service.getName(), provider.getName())); + network, service.getName(), provider.getName())); return; } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java index 106cf5180c3..1f4f98194dc 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java @@ -271,11 +271,10 @@ public class TungstenElement extends AdapterBase } protected boolean canHandle(Network network, Network.Service service) { - logger.debug("Checking if TungstenElement can handle service " + service.getName() + " on network " - + network.getDisplayText()); + logger.debug(String.format("Checking if TungstenElement can handle service %s on network %s", service.getName(), network)); if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("TungstenElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("TungstenElement is not a provider for network %s", network)); return false; } @@ -661,8 +660,7 @@ public class TungstenElement extends AdapterBase TungstenUtils.getPublicNetworkPolicyName(ipAddressVO.getId()), null, network.getUuid()); tungstenFabricUtils.sendTungstenCommand(deleteTungstenNetworkPolicyCommand, network.getDataCenterId()); } catch (IllegalArgumentException e) { - throw new CloudRuntimeException( - "Failing to expunge the vm from Tungsten-Fabric with the uuid " + vm.getUuid()); + throw new CloudRuntimeException(String.format("Failing to expunge the vm %s from Tungsten-Fabric", vm)); } } @@ -680,8 +678,7 @@ public class TungstenElement extends AdapterBase TungstenCommand deleteVmCmd = new DeleteTungstenVmCommand(vm.getUuid()); tungstenFabricUtils.sendTungstenCommand(deleteVmCmd, network.getDataCenterId()); } catch (IllegalArgumentException e) { - throw new CloudRuntimeException( - "Failing to expunge the vm from Tungsten-Fabric with the uuid " + vm.getUuid()); + throw new CloudRuntimeException(String.format("Failing to expunge the vm %s from Tungsten-Fabric", vm)); } } } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java index 4d22806a139..38eb6c34da2 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java @@ -196,7 +196,7 @@ public class TungstenGuestNetworkGuru extends GuestNetworkGuru implements Networ DeleteTungstenVmCommand cmd = new DeleteTungstenVmCommand(vm.getUuid()); tungstenFabricUtils.sendTungstenCommand(cmd, config.getDataCenterId()); } catch (IllegalArgumentException e) { - throw new CloudRuntimeException("Failing to expunge the vm from Tungsten-Fabric with the uuid " + vm.getUuid()); + throw new CloudRuntimeException(String.format("Failing to expunge the vm %s from Tungsten-Fabric", vm)); } } } @@ -290,7 +290,7 @@ public class TungstenGuestNetworkGuru extends GuestNetworkGuru implements Networ } } } catch (Exception ex) { - throw new CloudRuntimeException("unable to create Tungsten-Fabric network " + network.getUuid()); + throw new CloudRuntimeException(String.format("unable to create Tungsten-Fabric network %s", network)); } return implemented; } diff --git a/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java b/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java index fc92775c697..ec654af2b32 100644 --- a/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java +++ b/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java @@ -149,7 +149,7 @@ public class VxlanGuestNetworkGuru extends GuestNetworkGuru { public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vxlan || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } diff --git a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java index b2e1d23917b..551d96eab9a 100644 --- a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java +++ b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java @@ -215,7 +215,7 @@ public class CephObjectStoreDriverImpl extends BaseObjectStoreDriverImpl { RgwAdmin rgwAdmin = getRgwAdminClient(storeId); String username = account.getUuid(); - logger.debug("Attempting to create Ceph RGW user for account " + account.getAccountName() + " with UUID " + username); + logger.debug("Attempting to create Ceph RGW user for account {} with UUID {}", account, username); try { Optional user = rgwAdmin.getUserInfo(username); if (user.isPresent()) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index 329de5a398a..e573f453a6c 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -210,8 +210,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive AsyncCompletionCallback callback) { CreateCmdResult result = null; try { - logger.info("Volume creation starting for data store [" + dataStore.getName() + - "] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]"); + logger.info("Volume creation starting for data store [{}] and data object [{}] of type [{}]", + dataStore, dataObject, dataObject.getType()); // quota size of the cloudbyte volume will be increased with the given // HypervisorSnapshotReserve @@ -243,7 +243,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { volume = api.getVolume(context, dataIn); if (volume != null) { - logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); + logger.info("Template volume already exists [{}]", dataObject); } } @@ -261,7 +261,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive throw e; } } - logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); + logger.info("New volume created on remote storage for [{}]", dataObject); } // set these from the discovered or created volume before proceeding @@ -273,9 +273,9 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); result.setSuccess(true); - logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); + logger.info("Volume creation complete for [{}]", dataObject); } catch (Throwable e) { - logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); + logger.error("Volume creation failed for dataObject [{}]: {}", dataObject, e.toString(), e); result = new CreateCmdResult(null, new Answer(null)); result.setResult(e.toString()); result.setSuccess(false); @@ -318,7 +318,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive AsyncCompletionCallback callback) { CopyCommandResult result = null; try { - logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); + logger.info("Copying volume {} to {}]", srcdata, destdata); if (!canCopy(srcdata, destdata)) { throw new CloudRuntimeException( @@ -330,7 +330,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive Map details = _storagePoolDao.getDetails(storagePool.getId()); ProviderAdapter api = getAPI(storagePool, details); - logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); + logger.info("Copy volume {} to {}", srcdata, destdata); ProviderVolume outVolume; ProviderAdapterContext context = newManagedVolumeContext(destdata); @@ -347,14 +347,14 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive // if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size // we won't, however, shrink a volume if its smaller. if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) { - logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize()); + logger.info("Resizing volume {} to requested target volume size of {}", destdata, destdata.getSize()); api.resize(context, destIn, destdata.getSize()); } // initial volume info does not have connection map yet. That is added when grantAccess is called later. String finalPath = generatePathInfo(outVolume, null); persistVolumeData(storagePool, details, destdata, outVolume, null); - logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + logger.info("Copy completed from [{}] to [{}]", srcdata, destdata); VolumeObjectTO voto = new VolumeObjectTO(); voto.setPath(finalPath); @@ -381,9 +381,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive @Override public boolean canCopy(DataObject srcData, DataObject destData) { - logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" - + srcData.getDataStore().getId() + " AND destData [" - + destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]"); + logger.debug("canCopy: Checking srcData [{}:{}:{} AND destData [{}:{}:{}]", + srcData, srcData.getType(), srcData.getDataStore(), destData, destData.getType(), destData.getDataStore()); try { if (!isSameProvider(srcData)) { logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); @@ -458,12 +457,14 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive ProviderAdapterContext context = newManagedVolumeContext(data); ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO); - if (logger.isDebugEnabled()) logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize); + if (logger.isDebugEnabled()) + logger.debug("Calling provider API to resize volume {} to {}", data, resizeParameter.newSize); api.resize(context, dataIn, resizeParameter.newSize); if (vol.isAttachedVM()) { if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) { - if (logger.isDebugEnabled()) logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize); + if (logger.isDebugEnabled()) + logger.debug("Notify currently attached VM of volume resize for {} to {}", data, resizeParameter.newSize); _volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName()); } } @@ -484,7 +485,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive } public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid()); + logger.debug("Granting host {} access to volume {}", host, dataObject); try { StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); @@ -502,10 +503,10 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); - logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid()); + logger.info("Granted host {} access to volume {}", host, dataObject); return true; } catch (Throwable e) { - String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage(); + String msg = String.format("Error granting host %s access to volume %s: %s", host, dataObject, e.getMessage()); logger.error(msg); throw new CloudRuntimeException(msg, e); } @@ -517,7 +518,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive return; } - logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid()); + logger.debug("Revoking access for host {} to volume {}", host, dataObject); try { StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); @@ -535,9 +536,9 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive Map connIdMap = api.getConnectionIdMap(dataIn); persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); - logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid()); + logger.info("Revoked access for host {} to volume {}", host, dataObject); } catch (Throwable e) { - String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage(); + String msg = String.format("Error revoking access for host %s to volume %s: %s", host, dataObject, e.getMessage()); logger.error(msg); throw new CloudRuntimeException(msg, e); } @@ -546,8 +547,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { - logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " + - volumeInfo.getPath() + ": " + qualityOfServiceState.toString()); + logger.info("handleQualityOfServiceVolumeMigration: {} path: {}: {}", + volumeInfo, volumeInfo.getPath(), qualityOfServiceState.toString()); } @Override diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index f6ace68e2e8..771f79887e0 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -262,7 +262,8 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public boolean attachCluster(DataStore store, ClusterScope scope) { - logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]"); + ClusterVO cluster = _clusterDao.findById(scope.getScopeId()); + logger.info("Attaching storage pool {} to cluster {}", store, cluster); _dataStoreHelper.attachCluster(store); StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); @@ -272,29 +273,29 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); if (allHosts.isEmpty()) { _primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", cluster)); } if (dataStoreVO.isManaged()) { //boolean success = false; - for (HostVO h : allHosts) { - logger.debug("adding host " + h.getName() + " to storage pool " + store.getName()); + for (HostVO host : allHosts) { + logger.debug("adding host {} to storage pool {}", host, store); } } logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { + for (HostVO host : allHosts) { try { - _storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); - poolHosts.add(h); + _storageMgr.connectHostToSharedPool(host, primarystore.getId()); + poolHosts.add(host); } catch (Exception e) { - logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + logger.warn("Unable to establish a connection between {} and {}", host, primarystore, e); } } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool {} on cluster {}", primarystore, cluster); _primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -304,19 +305,19 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl @Override public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool {} to host {}", store::toString, () -> hostDao.findById(scope.getScopeId())); _dataStoreHelper.attachHost(store, scope, existingInfo); return true; } @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool {} to zone {}", dataStore, zoneDao.findById(scope.getScopeId())); List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); @@ -336,7 +337,7 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public boolean maintain(DataStore store) { - logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode"); + logger.info("Placing storage pool {} in maintenance mode", store); if (_storagePoolAutomation.maintain(store)) { return _dataStoreHelper.maintain(store); } else { @@ -349,7 +350,7 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public boolean cancelMaintain(DataStore store) { - logger.info("Canceling storage pool maintainence for [" + store.getName() + "]"); + logger.info("Canceling storage pool maintenance for {}", store); if (_dataStoreHelper.cancelMaintain(store)) { return _storagePoolAutomation.cancelMaintain(store); } else { @@ -362,7 +363,7 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public boolean deleteDataStore(DataStore store) { - logger.info("Delete datastore called for [" + store.getName() + "]"); + logger.info("Delete datastore called for {}", store); return _dataStoreHelper.deletePrimaryDataStore(store); } @@ -371,7 +372,7 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public boolean migrateToObjectStore(DataStore store) { - logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time"); + logger.info("Migrate datastore called for {}. This is not currently implemented for this provider at this time", store); return false; } @@ -388,7 +389,7 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public void enableStoragePool(DataStore store) { - logger.info("Enabling storage pool [" + store.getName() + "]"); + logger.info("Enabling storage pool {}", store); _dataStoreHelper.enable(store); } @@ -397,7 +398,7 @@ public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycl */ @Override public void disableStoragePool(DataStore store) { - logger.info("Disabling storage pool [" + store.getName() + "]"); + logger.info("Disabling storage pool {}", store); _dataStoreHelper.disable(store); } } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java index a0c8ee722a0..346649d669b 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -18,11 +18,16 @@ package org.apache.cloudstack.storage.datastore.provider; import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -import com.cloud.exception.StorageConflictException; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.dao.StoragePoolHostDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -32,6 +37,12 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener { @Inject StoragePoolHostDao storagePoolHostDao; + @Inject + HostDao hostDao; + + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { } @@ -49,11 +60,18 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener { } @Override - public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { - logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]"); - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + public boolean hostConnect(long hostId, long poolId) { + HostVO host = hostDao.findById(hostId); + StoragePoolVO pool = primaryDataStoreDao.findById(poolId); + return hostConnect(host, pool); + } + + @Override + public boolean hostConnect(Host host, StoragePool pool) { + logger.debug("hostConnect called for host {}, pool {}", host, pool); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); if (storagePoolHost == null) { - storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + storagePoolHost = new StoragePoolHostVO(pool.getId(), host.getId(), ""); storagePoolHostDao.persist(storagePoolHost); } else { return false; @@ -63,11 +81,18 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener { @Override public boolean hostDisconnected(long hostId, long poolId) { - logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]"); - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + HostVO host = hostDao.findById(hostId); + StoragePoolVO pool = primaryDataStoreDao.findById(poolId); + return hostDisconnected(host, pool); + } + + @Override + public boolean hostDisconnected(Host host, StoragePool pool){ + logger.debug("hostDisconnected called for host {}, pool {}", host, pool); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); if (storagePoolHost != null) { - storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), pool.getId()); } return true; } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java index 60359dd2c26..3d4afcaf95c 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java @@ -133,7 +133,7 @@ public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStore capacityIops = capacityIops - Iops; if (capacityIops < 0) { - throw new CloudRuntimeException("IOPS not available. [pool:" + storagePool.getName() + "] [availiops:" + capacityIops + "] [requirediops:" + Iops + "]"); + throw new CloudRuntimeException(String.format("IOPS not available. [pool:%s] [availiops:%d] [requirediops:%d]", storagePool, capacityIops, Iops)); } String protocoltype = null; diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java index 5d7f2d86e14..3ad08428e9d 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java @@ -365,8 +365,8 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif if (!dataStoreVO.isManaged()) { boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), primarystore); + for (HostVO host : allHosts) { + success = createStoragePool(host, primarystore); if (success) { break; } @@ -377,7 +377,7 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { - storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); } catch (Exception e) { logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); @@ -393,8 +393,8 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif return true; } - private boolean createStoragePool(long hostId, StoragePool pool) { - logger.debug("creating pool " + pool.getName() + " on host " + hostId); + private boolean createStoragePool(Host host, StoragePool pool) { + logger.debug(String.format("creating pool %s on host %s", pool, host)); if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD @@ -403,17 +403,17 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = agentMgr.easySend(hostId, cmd); + final Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer != null && answer.getResult()) { return true; } else { primaryDataStoreDao.expunge(pool.getId()); String msg = ""; if (answer != null) { - msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); + msg = String.format("Can not create storage pool through host %s due to %s", host, answer.getDetails()); logger.warn(msg); } else { - msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; + msg = String.format("Can not create storage pool through host %s due to CreateStoragePoolCommand returns null", host); logger.warn(msg); } throw new CloudRuntimeException(msg); @@ -433,7 +433,7 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java index d2307111a81..3946acc35d9 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java @@ -105,20 +105,20 @@ public class ElastistorHostListener implements HypervisorHostListener { final Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command for pool %s", poolVO)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %s", poolVO, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST,pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to establish connection from storage head to storage pool %s due to %s", poolVO, answer.getDetails())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format("Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%sHost=%s", poolVO, host); - logger.info("Connection established between " + pool + " host + " + hostId); + logger.info(String.format("Connection established between pool %s and host %s", pool, host)); return true; } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index 6423b07a909..dcf84525748 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -388,7 +388,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) { - String errMesg = "Error revoking access for Volume : " + dataObject.getId(); + String errMesg = String.format("Error revoking access for Volume : %s", dataObject); logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); } finally { @@ -588,7 +588,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { usedSpaceBytes += DateraUtil.gibToBytes(appInstance.getSize()); } } catch (DateraObject.DateraError dateraError) { - String errMesg = "Error getting used bytes for storage pool : " + storagePool.getId(); + String errMesg = String.format("Error getting used bytes for storage pool : %s", storagePool); logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); } @@ -723,7 +723,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePoolDao.update(storagePoolId, storagePool); } catch (UnsupportedEncodingException | DateraObject.DateraError e) { - String errMesg = "Error deleting app instance for Volume : " + volumeInfo.getId(); + String errMesg = String.format("Error deleting app instance for Volume: %s", volumeInfo); logger.warn(errMesg, e); throw new CloudRuntimeException(errMesg); } @@ -826,8 +826,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { String iqnPath = DateraUtil.generateIqnPath(iqn); VolumeVO volumeVo = _volumeDao.findById(volumeInfo.getId()); - logger.debug("volume ID : " + volumeInfo.getId()); - logger.debug("volume uuid : " + volumeInfo.getUuid()); + logger.debug(String.format("volume: %s", volumeInfo)); volumeVo.set_iScsiName(iqnPath); volumeVo.setFolder(appInstance.getName()); @@ -970,7 +969,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { if (baseAppInstanceName == null) { throw new CloudRuntimeException( - "Unable to find a base volume to clone " + volumeInfo.getId() + " type " + dataType); + "Unable to find a base volume to clone " + volumeInfo.getUuid() + " type " + dataType); } // Clone the app Instance @@ -995,7 +994,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } if (appInstance == null) { throw new CloudRuntimeException("Unable to create an app instance from snapshot or template " - + volumeInfo.getId() + " type " + dataType); + + volumeInfo.getUuid() + " type " + dataType); } logger.debug("Datera - Cloned " + baseAppInstanceName + " to " + clonedAppInstanceName); @@ -1114,7 +1113,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { templateIops, replicaCount, volumePlacement, ipPool); if (appInstance == null) { - throw new CloudRuntimeException("Unable to create Template volume " + templateInfo.getId()); + throw new CloudRuntimeException(String.format("Unable to create Template volume %s", templateInfo.getUuid())); } iqn = appInstance.getIqn(); @@ -1306,9 +1305,8 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { DateraObject.VolumeSnapshot volumeSnapshot = DateraUtil.takeVolumeSnapshot(conn, baseAppInstanceName); if (volumeSnapshot == null) { - logger.error("Unable to take native snapshot appInstance name:" + baseAppInstanceName - + " volume ID " + volumeInfo.getId()); - throw new CloudRuntimeException("Unable to take native snapshot for volume " + volumeInfo.getId()); + logger.error(String.format("Unable to take native snapshot appInstance name: %s volume: %s", baseAppInstanceName, volumeInfo)); + throw new CloudRuntimeException("Unable to take native snapshot for volume " + volumeInfo.getUuid()); } String snapshotName = baseAppInstanceName + ":" + volumeSnapshot.getTimestamp(); @@ -1358,7 +1356,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { result.setResult(null); } catch (Exception ex) { - logger.debug("Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex); + logger.debug(String.format("Failed to take CloudStack snapshot: %s", snapshotInfo), ex); result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString())); @@ -1494,7 +1492,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePoolDao.update(storagePoolId, storagePool); } catch (Exception ex) { - logger.debug("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, + logger.debug(String.format("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot: %s", snapshotInfo), ex); throw ex; } @@ -1531,7 +1529,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePoolDao.update(storagePoolId, storagePool); } catch (Exception ex) { - logger.debug("Failed to delete template volume. CloudStack template ID: " + templateInfo.getId(), ex); + logger.debug(String.format("Failed to delete template volume. CloudStack template: %s", templateInfo), ex); throw ex; } @@ -1553,8 +1551,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { long storagePoolId = volumeVO.getPoolId(); long csSnapshotId = snapshotInfo.getId(); - logger.info("Datera - restoreVolumeSnapshot from snapshotId " + String.valueOf(csSnapshotId) + " to volume" - + volumeVO.getName()); + logger.info(String.format("Datera - restoreVolumeSnapshot from snapshot %s to volume %s", snapshotInfo, volumeVO)); DateraObject.AppInstance appInstance; @@ -1595,7 +1592,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { callback.complete(commandResult); } catch (Exception ex) { - logger.debug("Error in 'revertSnapshot()'. CloudStack snapshot ID: " + csSnapshotId, ex); + logger.debug(String.format("Error in 'revertSnapshot()'. CloudStack snapshot: %s", snapshotInfo), ex); throw new CloudRuntimeException(ex.getMessage()); } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java index 6d222b36b85..04ea3141423 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java @@ -260,7 +260,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc for (HostVO host : allHosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); poolHosts.add(host); } catch (Exception e) { @@ -302,7 +302,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java index 89ac2a9a21c..a0dc23da486 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java @@ -86,7 +86,7 @@ public class DateraHostListener implements HypervisorHostListener { HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); + logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId); return false; } StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); @@ -280,9 +280,8 @@ public class DateraHostListener implements HypervisorHostListener { } if (!answer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; - HostVO host = _hostDao.findById(hostId); + String msg = String.format("Unable to modify targets on the following host: %s", host); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); @@ -294,21 +293,22 @@ public class DateraHostListener implements HypervisorHostListener { Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", storagePool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + String msg = String.format("Unable to attach storage pool %s to host %d", storagePool, hostId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + - " (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format( + "Unable to establish a connection from agent to storage pool %s due to %s", + storagePool, answer.getDetails())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format("ModifyStoragePoolAnswer expected ; Pool = %s Host = %d", storagePool, hostId); - logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + logger.info("Connection established between storage pool {} and host + {}", storagePool, hostId); } private List> getTargets(long clusterId, long storagePoolId) { diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 02a28b6e947..8bb9ef1ead8 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -256,7 +256,9 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri } } } catch (Exception ex) { - logger.debug("Unable to destroy volume" + data.getId(), ex); + logger.debug(String.format( + "Unable to destroy volume [id: %d, uuid: %s]", + data.getId(), data.getUuid()), ex); result.setResult(ex.toString()); } callback.complete(result); @@ -264,7 +266,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { - logger.debug(String.format("Copying volume %s(%s) to %s(%s)", srcdata.getId(), srcdata.getType(), destData.getId(), destData.getType())); + logger.debug("Copying volume [{}] to [{}]", srcdata, destData); boolean encryptionRequired = anyVolumeRequiresEncryption(srcdata, destData); DataStore store = destData.getDataStore(); if (store.getRole() == DataStoreRole.Primary) { @@ -381,7 +383,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri callback.complete(result); return; } catch (Exception e) { - logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + logger.debug("Failed to take snapshot: {}", snapshot, e); result = new CreateCmdResult(null, null); result.setResult(e.toString()); } @@ -416,7 +418,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri } } } catch (Exception ex) { - logger.debug("Unable to revert snapshot " + snapshot.getId(), ex); + logger.debug("Unable to revert snapshot {}", snapshot, ex); result.setResult(ex.toString()); } callback.complete(result); @@ -476,7 +478,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri if (storagePoolVO != null) { volumeVO.setPoolId(storagePoolVO.getId()); } else { - logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreUUID, vol.getId())); + logger.warn("Unable to find datastore {} while updating the new datastore of the volume {}", datastoreUUID, vol); } } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index bc66e2ff136..771fcf09255 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -25,6 +25,9 @@ import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.ValidateVcenterDetailsCommand; import com.cloud.alert.AlertManager; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageConflictException; import com.cloud.exception.StorageUnavailableException; @@ -85,6 +88,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Inject StorageManager storageMgr; + @Inject + ClusterDao clusterDao; @Inject VolumeDao volumeDao; @Inject @@ -94,6 +99,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Inject protected VirtualMachineManager vmMgr; @Inject + HostPodDao podDao; + @Inject protected SecondaryStorageVmDao _secStrgDao; @Inject UserVmDao userVmDao; @@ -102,6 +109,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Inject protected DomainRouterDao _domrDao; @Inject + DataCenterDao zoneDao; + @Inject protected StoragePoolHostDao _storagePoolHostDao; @Inject protected AlertManager _alertMgr; @@ -321,7 +330,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, clusterId, podId, zoneId); if (allHosts.isEmpty()) { - throw new CloudRuntimeException("No host up to associate a storage pool with in zone: " + zoneId + " pod: " + podId + " cluster: " + clusterId); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in zone: %s pod: %s cluster: %s", + zoneDao.findById(zoneId), podDao.findById(podId), clusterDao.findById(clusterId))); } boolean success = false; @@ -333,18 +343,20 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor return; } else { if (answer != null) { - throw new InvalidParameterValueException("Provided vCenter server details does not match with the existing vCenter in zone id: " + zoneId); + throw new InvalidParameterValueException(String.format("Provided vCenter server details does not match with the existing vCenter in zone: %s", + zoneDao.findById(zoneId))); } else { - String msg = "Can not validate vCenter through host " + h.getId() + " due to ValidateVcenterDetailsCommand returns null"; - logger.warn(msg); + logger.warn("Can not validate vCenter through host {} due to ValidateVcenterDetailsCommand returns null", h); } } } - throw new CloudRuntimeException("Could not validate vCenter details through any of the hosts with in zone: " + zoneId + ", pod: " + podId + ", cluster: " + clusterId); + throw new CloudRuntimeException(String.format("Could not validate vCenter details through any of the hosts with in zone: %s, pod: %s, cluster: %s", + zoneDao.findById(zoneId), podDao.findById(podId), clusterDao.findById(clusterId))); } - protected boolean createStoragePool(long hostId, StoragePool pool) { - logger.debug("creating pool " + pool.getName() + " on host " + hostId); + protected boolean createStoragePool(HostVO host, StoragePool pool) { + long hostId = host.getId(); + logger.debug("creating pool {} on host {}", pool, host); if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && @@ -363,10 +375,10 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor primaryDataStoreDao.expunge(pool.getId()); String msg = ""; if (answer != null) { - msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); + msg = String.format("Can not create storage pool through host %s due to %s", host, answer.getDetails()); logger.warn(msg); } else { - msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; + msg = String.format("Can not create storage pool through host %s due to CreateStoragePoolCommand returns null", host); logger.warn(msg); } throw new CloudRuntimeException(msg); @@ -381,18 +393,18 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); if (allHosts.isEmpty()) { primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primarystore.getClusterId()))); } if (primarystore.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - logger.warn("Can not create storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("Can not create storage pool {} on cluster {}", primarystore::toString, () -> clusterDao.findById(primarystore.getClusterId())); primaryDataStoreDao.expunge(primarystore.getId()); return false; } boolean success = false; for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), primarystore); + success = createStoragePool(h, primarystore); if (success) { break; } @@ -402,7 +414,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { - storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); } catch (StorageConflictException se) { primaryDataStoreDao.expunge(primarystore.getId()); @@ -417,7 +429,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool {} on cluster {}", primarystore::toString, () -> clusterDao.findById(primarystore.getClusterId())); primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -433,11 +445,11 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); } catch (StorageConflictException se) { primaryDataStoreDao.expunge(dataStore.getId()); - throw new CloudRuntimeException("Storage has already been added as local storage to host: " + host.getName()); + throw new CloudRuntimeException(String.format("Storage has already been added as local storage to host: %s", host)); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); String reason = storageMgr.getStoragePoolMountFailureReason(e.getMessage()); @@ -518,7 +530,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor DataStore dataStore = dataStoreHelper.attachHost(store, scope, existingInfo); if(existingInfo.getCapacityBytes() == 0){ try { - storageMgr.connectHostToSharedPool(scope.getScopeId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(hostDao.findById(scope.getScopeId()), dataStore.getId()); } catch (StorageUnavailableException ex) { logger.error("Storage unavailable ",ex); } catch (StorageConflictException ex) { diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 924c98b7912..4bab2f83712 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -143,14 +143,12 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { when(_dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); when(store.isShared()).thenReturn(true); - when(store.getName()).thenReturn("newPool"); when(store.getStorageProviderName()).thenReturn("default"); when(_dataStoreProviderMgr.getDataStoreProvider(anyString())).thenReturn(dataStoreProvider); when(dataStoreProvider.getName()).thenReturn("default"); - when(hostListener.hostConnect(Mockito.anyLong(), Mockito.anyLong())).thenReturn(true); storageMgr.registerHostListener("default", hostListener); @@ -179,7 +177,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { CloudRuntimeException exception = new CloudRuntimeException(exceptionString); StorageManager storageManager = Mockito.mock(StorageManager.class); - Mockito.when(storageManager.connectHostToSharedPool(Mockito.anyLong(), Mockito.anyLong())).thenThrow(exception); + Mockito.when(storageManager.connectHostToSharedPool(Mockito.any(), Mockito.anyLong())).thenThrow(exception); Mockito.when(storageManager.getStoragePoolMountFailureReason(exceptionString)).thenReturn(mountFailureReason); ReflectionTestUtils.setField(_cloudStackPrimaryDataStoreLifeCycle, "storageMgr", storageManager); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java index 42f7f82b24f..8abd8fd8bd2 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java @@ -382,7 +382,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver long vMaxIops = maxIops != null ? maxIops : 0; long newIops = vcIops + vMaxIops; capacityIops -= newIops; - logger.info("Current storagepool " + storagePool.getName() + " iops capacity: " + capacityIops); + logger.info(String.format("Current storagepool %s iops capacity: %d", storagePool, capacityIops)); storagePool.setCapacityIops(Math.max(0, capacityIops)); _storagePoolDao.update(storagePool.getId(), storagePool); } @@ -1198,7 +1198,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - logger.debug("Linstor: takeSnapshot with snapshot: " + snapshotInfo.getUuid()); + logger.debug(String.format("Linstor: takeSnapshot with snapshot: %s", snapshotInfo.getSnapshotVO())); final VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); final VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java index ac2563cd436..b45953989b5 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java @@ -173,22 +173,22 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi return dataStoreHelper.createPrimaryDataStore(parameters); } - protected boolean createStoragePool(long hostId, StoragePool pool) { - logger.debug("creating pool " + pool.getName() + " on host " + hostId); + protected boolean createStoragePool(Host host, StoragePool pool) { + logger.debug(String.format("creating pool %s on host %s", pool, host)); if (pool.getPoolType() != Storage.StoragePoolType.Linstor) { logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = _agentMgr.easySend(hostId, cmd); + final Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer != null && answer.getResult()) { return true; } else { _primaryDataStoreDao.expunge(pool.getId()); String msg = answer != null ? - "Can not create storage pool through host " + hostId + " due to " + answer.getDetails() : - "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; + String.format("Can not create storage pool %s through host %s due to %s", pool, host, answer.getDetails()) : + String.format("Can not create storage pool %s through host %s due to CreateStoragePoolCommand returns null", pool, host); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -219,9 +219,9 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi List poolHosts = new ArrayList<>(); for (HostVO host : allHosts) { try { - createStoragePool(host.getId(), primaryDataStoreInfo); + createStoragePool(host, primaryDataStoreInfo); - _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); poolHosts.add(host); } catch (Exception e) { @@ -254,7 +254,7 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java index 534431ed681..da458002f6d 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.storage.datastore.provider; import com.cloud.exception.StorageConflictException; import com.cloud.host.HostVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; public class LinstorHostListener extends DefaultHostListener { @Override @@ -27,6 +28,7 @@ public class LinstorHostListener extends DefaultHostListener { host.setParent(host.getName()); hostDao.update(host.getId(), host); } - return super.hostConnect(hostId, poolId); + StoragePoolVO pool = primaryStoreDao.findById(poolId); + return super.hostConnect(host, pool); } } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java index c7fe6d21190..4e4c882ae80 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java @@ -83,12 +83,12 @@ public class LinstorVMSnapshotStrategy extends DefaultVMSnapshotStrategy { Snapshot snap = new Snapshot(); snap.setName(vmSnapshotVO.getName()); snap.setResourceName(LinstorUtil.RSC_PREFIX + vol.getPath()); - log.debug(String.format("Add volume %s;%s to snapshot", vol.getName(), snap.getResourceName())); + log.debug("Add volume {};{} to snapshot", vol, snap.getResourceName()); cmsReq.addSnapshotsItem(snap); } - log.debug(String.format("Creating multi snapshot %s", vmSnapshotVO.getName())); + log.debug("Creating multi snapshot {}", vmSnapshotVO); ApiCallRcList answers = api.createMultiSnapshot(cmsReq); - log.debug(String.format("Created multi snapshot %s", vmSnapshotVO.getName())); + log.debug("Created multi snapshot {}", vmSnapshotVO); if (answers.hasError()) { throw new CloudRuntimeException( "Error creating vm snapshots: " + LinstorUtil.getBestErrorMessage(answers)); @@ -123,7 +123,7 @@ public class LinstorVMSnapshotStrategy extends DefaultVMSnapshotStrategy { @Override public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { - log.info("Take vm snapshot: " + vmSnapshot.getName()); + log.info("Take vm snapshot: {}", vmSnapshot); UserVm userVm = _userVmDao.findById(vmSnapshot.getVmId()); VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; @@ -151,7 +151,7 @@ public class LinstorVMSnapshotStrategy extends DefaultVMSnapshotStrategy { linstorCreateMultiSnapshot(api, vmSnapshotVO, volumeTOs); - log.debug(String.format("finalize vm snapshot create for %s", vmSnapshotVO.getName())); + log.debug("finalize vm snapshot create for {}", vmSnapshotVO); finalizeCreate(vmSnapshotVO, volumeTOs); result = _vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); @@ -246,11 +246,11 @@ public class LinstorVMSnapshotStrategy extends DefaultVMSnapshotStrategy { if (err != null) { String errMsg = String.format("Unable to delete Linstor resource %s snapshot %s: %s", - rscName, snapshotName, err); + rscName, vmSnapshotVO, err); log.error(errMsg); failedToDelete.add(errMsg); } - log.info("Linstor: Deleted snapshot " + snapshotName + " for resource " + rscName); + log.info("Linstor: Deleted snapshot {} for resource {}", vmSnapshotVO, rscName); } if (!failedToDelete.isEmpty()) { @@ -306,7 +306,7 @@ public class LinstorVMSnapshotStrategy extends DefaultVMSnapshotStrategy { @Override public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { - log.debug("Revert vm snapshot: " + vmSnapshot.getName()); + log.debug("Revert vm snapshot: {}", vmSnapshot); VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; UserVmVO userVm = _userVmDao.findById(vmSnapshot.getVmId()); @@ -325,7 +325,7 @@ public class LinstorVMSnapshotStrategy extends DefaultVMSnapshotStrategy { result = revertVMSnapshotOperation(vmSnapshot, userVm.getId()); } catch (CloudRuntimeException | NoTransitionException e) { String errMsg = String.format( - "Error while finalize create vm snapshot [%s] due to %s", vmSnapshot.getName(), e.getMessage()); + "Error while finalize create vm snapshot [%s] due to %s", vmSnapshot, e.getMessage()); log.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java index 62995196cac..79f771721f5 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java @@ -141,7 +141,7 @@ public class NexentaPrimaryDataStoreLifeCycle for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java index a9dc8b42cd5..e605b159c99 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java @@ -22,6 +22,8 @@ import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.util.concurrent.ConcurrentHashMap; +import com.cloud.storage.StoragePool; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -49,9 +51,26 @@ public class ScaleIOGatewayClientConnectionPool { gatewayClients = new ConcurrentHashMap(); } - public ScaleIOGatewayClient getClient(Long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) + public ScaleIOGatewayClient getClient(StoragePool storagePool, + StoragePoolDetailsDao storagePoolDetailsDao) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { - Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, "Invalid storage pool id"); + return getClient(storagePool.getId(), storagePool.getUuid(), storagePoolDetailsDao); + } + + + public ScaleIOGatewayClient getClient(DataStore dataStore, + StoragePoolDetailsDao storagePoolDetailsDao) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + return getClient(dataStore.getId(), dataStore.getUuid(), storagePoolDetailsDao); + } + + + private ScaleIOGatewayClient getClient(Long storagePoolId, String storagePoolUuid, + StoragePoolDetailsDao storagePoolDetailsDao) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + + Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, + "Invalid storage pool id"); ScaleIOGatewayClient client = null; synchronized (gatewayClients) { @@ -67,23 +86,24 @@ public class ScaleIOGatewayClientConnectionPool { client = new ScaleIOGatewayClientImpl(url, username, password, false, clientTimeout, clientMaxConnections); gatewayClients.put(storagePoolId, client); - logger.debug("Added gateway client for the storage pool: " + storagePoolId); + logger.debug("Added gateway client for the storage pool [id: {}, uuid: {}]", storagePoolId, storagePoolUuid); } } return client; } - public boolean removeClient(Long storagePoolId) { - Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, "Invalid storage pool id"); + public boolean removeClient(DataStore dataStore) { + Preconditions.checkArgument(dataStore != null && dataStore.getId() > 0, + "Invalid storage pool id"); ScaleIOGatewayClient client = null; synchronized (gatewayClients) { - client = gatewayClients.remove(storagePoolId); + client = gatewayClients.remove(dataStore.getId()); } if (client != null) { - logger.debug("Removed gateway client for the storage pool: " + storagePoolId); + logger.debug("Removed gateway client for the storage pool: {}", dataStore); return true; } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 8044e787bd2..192ae4636e9 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -151,8 +151,12 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { sdcManager = new ScaleIOSDCManagerImpl(); } - public ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + ScaleIOGatewayClient getScaleIOClient(final StoragePool storagePool) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); + } + + ScaleIOGatewayClient getScaleIOClient(final DataStore dataStore) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore, storagePoolDetailsDao); } private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception { @@ -160,10 +164,10 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { final String sdcId = sdcManager.prepareSDC(host, dataStore); if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); - throw new CloudRuntimeException("Unable to grant access to volume: " + volume.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + throw new CloudRuntimeException("Unable to grant access to volume: " + volume + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps); } @@ -197,22 +201,25 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { final String sdcId = sdcManager.prepareSDC(host, dataStore); if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); - throw new CloudRuntimeException(String.format("Unable to grant access to %s: %s, no Sdc connected with host ip: %s", dataObject.getType(), dataObject.getId(), host.getPrivateIpAddress())); + throw new CloudRuntimeException(String.format( + "Unable to grant access to %s: [id: %d, uuid: %s], no Sdc connected with host ip: %s", + dataObject.getType(), dataObject.getId(), + dataObject.getUuid(), host.getPrivateIpAddress())); } if (DataObjectType.VOLUME.equals(dataObject.getType())) { final VolumeVO volume = volumeDao.findById(dataObject.getId()); - logger.debug("Granting access for PowerFlex volume: " + volume.getPath()); + logger.debug("Granting access for PowerFlex volume: {} at path {}", volume, volume.getPath()); return setVolumeLimitsFromDetails(volume, host, dataStore); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); - logger.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + logger.debug("Granting access for PowerFlex template volume: {}", templatePoolRef.getInstallPath()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; - logger.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + logger.debug("Granting access for PowerFlex volume snapshot: {} at path {}", snapshot, snapshot.getPath()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } @@ -235,23 +242,26 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } try { - final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + final String sdcId = getConnectedSdc(dataStore, host); if (StringUtils.isBlank(sdcId)) { - logger.warn(String.format("Unable to revoke access for %s: %s, no Sdc connected with host ip: %s", dataObject.getType(), dataObject.getId(), host.getPrivateIpAddress())); + logger.warn("Unable to revoke access for {}: [id: {}, uuid: {}], " + + "no Sdc connected with host [id: {}, uuid: {}, ip: {}]", + dataObject.getType(), dataObject.getId(), dataObject.getUuid(), + host.getId(), host.getUuid(), host.getPrivateIpAddress()); return; } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); if (DataObjectType.VOLUME.equals(dataObject.getType())) { final VolumeVO volume = volumeDao.findById(dataObject.getId()); - logger.debug("Revoking access for PowerFlex volume: " + volume.getPath()); + logger.debug("Revoking access for PowerFlex volume: {} at path {}", volume, volume.getPath()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); - logger.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + logger.debug("Revoking access for PowerFlex template volume: {}", templatePoolRef.getInstallPath()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; - logger.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath()); + logger.debug("Revoking access for PowerFlex volume snapshot: {} at path {}", snapshot, snapshot.getPath()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } if (client.listVolumesMappedToSdc(sdcId).isEmpty()) { @@ -272,13 +282,15 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { logger.debug("Revoking access for PowerFlex volume: " + volumePath); - final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + final String sdcId = getConnectedSdc(dataStore, host); if (StringUtils.isBlank(sdcId)) { - logger.warn(String.format("Unable to revoke access for volume: %s, no Sdc connected with host ip: %s", volumePath, host.getPrivateIpAddress())); + logger.warn("Unable to revoke access for volume: {}, " + + "no Sdc connected with host [id: {}, uuid: {}, ip: {}]", + volumePath, host.getId(), host.getUuid(), host.getPrivateIpAddress()); return; } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volumePath), sdcId); if (client.listVolumesMappedToSdc(sdcId).isEmpty()) { sdcManager = ComponentContext.inject(sdcManager); @@ -294,19 +306,20 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { revokeAccess(dataObject, host, dataStore); } - public String getConnectedSdc(long poolId, long hostId) { + public String getConnectedSdc(DataStore dataStore, Host host) { try { - StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(poolId, hostId); + StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(dataStore.getId(), host.getId()); if (poolHostVO == null) { return null; } - final ScaleIOGatewayClient client = getScaleIOClient(poolId); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); if (client.isSdcConnected(poolHostVO.getLocalPath())) { return poolHostVO.getLocalPath(); } } catch (Exception e) { - logger.warn("Couldn't check SDC connection for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); + logger.warn(String.format("Couldn't check SDC connection for the host: %s and " + + "storage pool: %s due to %s", host, dataStore, e.getMessage()), e); } return null; @@ -424,7 +437,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); final String scaleIOVolumeId = ScaleIOUtil.getVolumePath(volumeVO.getPath()); String snapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshotInfo.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); @@ -441,8 +454,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { result = new CreateCmdResult(null, createObjectAnswer); result.setResult(null); } catch (Exception e) { - String errMsg = "Unable to take PowerFlex volume snapshot for volume: " + volumeInfo.getId() + " due to " + e.getMessage(); - logger.warn(errMsg); + logger.warn("Unable to take PowerFlex volume snapshot for volume: {} due to {}", volumeInfo, e.getMessage()); result = new CreateCmdResult(null, new CreateObjectAnswer(e.toString())); result.setResult(e.toString()); } @@ -470,8 +482,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { return; } - long storagePoolId = volumeVO.getPoolId(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + StoragePoolVO storagePool = storagePoolDao.findById(volumeVO.getPoolId()); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); String snapshotVolumeId = ScaleIOUtil.getVolumePath(snapshot.getPath()); final String destVolumeId = ScaleIOUtil.getVolumePath(volumeVO.getPath()); client.revertSnapshot(snapshotVolumeId, destVolumeId); @@ -479,7 +491,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { CommandResult commandResult = new CommandResult(); callback.complete(commandResult); } catch (Exception ex) { - logger.debug("Unable to revert to PowerFlex snapshot: " + snapshot.getId(), ex); + logger.debug(String.format("Unable to revert to PowerFlex snapshot: %s", snapshot), ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -498,7 +510,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); final String scaleIOStoragePoolId = storagePool.getPath(); final Long sizeInBytes = volumeInfo.getSize(); final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0)); @@ -534,7 +546,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { // if volume needs to be set up with encryption, do it now if it's not a root disk (which gets done during template copy) if (anyVolumeRequiresEncryption(volumeInfo) && (!volumeInfo.getVolumeType().equals(Volume.Type.ROOT) || migrationInvolved)) { - logger.debug(String.format("Setting up encryption for volume %s", volumeInfo.getId())); + logger.debug("Setting up encryption for volume [id: {}, uuid: {}, name: {}]", + volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName()); VolumeObjectTO prepVolume = (VolumeObjectTO) createdObject.getTO(); prepVolume.setPath(volumePath); prepVolume.setUuid(volumePath); @@ -558,7 +571,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } } else { - logger.debug(String.format("No encryption configured for data volume %s", volumeInfo)); + logger.debug("No encryption configured for data volume [id: {}, uuid: {}, name: {}]", + volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName()); } return answer; @@ -578,7 +592,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); final String scaleIOStoragePoolId = storagePool.getPath(); final Long sizeInBytes = templateInfo.getSize(); final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0)); @@ -679,7 +693,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(scaleIOVolumePath); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId; @@ -773,26 +787,26 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { * Data stores of file type happen automatically, but block device types have to handle it. Unfortunately for ScaleIO this means we add a whole 8GB to * the original size, but only if we are close to an 8GB boundary. */ - logger.debug(String.format("Copying template %s to volume %s", srcData.getId(), destData.getId())); + logger.debug("Copying template {} to volume {}", srcData, destData); VolumeInfo destInfo = (VolumeInfo) destData; boolean encryptionRequired = anyVolumeRequiresEncryption(destData); if (encryptionRequired) { if (needsExpansionForEncryptionHeader(srcData.getSize(), destData.getSize())) { long newSize = destData.getSize() + (1<<30); - logger.debug(String.format("Destination volume %s(%s) is configured for encryption. Resizing to fit headers, new size %s will be rounded up to nearest 8Gi", destInfo.getId(), destData.getSize(), newSize)); + logger.debug("Destination volume {} ({}) is configured for encryption. Resizing to fit headers, new size {} will be rounded up to nearest 8Gi", destInfo, destData.getSize(), newSize); ResizeVolumePayload p = new ResizeVolumePayload(newSize, destInfo.getMinIops(), destInfo.getMaxIops(), destInfo.getHypervisorSnapshotReserve(), false, destInfo.getAttachedVmName(), null, true); destInfo.addPayload(p); resizeVolume(destInfo); } else { - logger.debug(String.format("Template %s has size %s, ok for volume %s with size %s", srcData.getId(), srcData.getSize(), destData.getId(), destData.getSize())); + logger.debug("Template {} has size {}, ok for volume {} with size {}", srcData, srcData.getSize(), destData, destData.getSize()); } } else { - logger.debug(String.format("Destination volume is not configured for encryption, skipping encryption prep. Volume: %s", destData.getId())); + logger.debug("Destination volume is not configured for encryption, skipping encryption prep. Volume: {}", destData); } // Copy PowerFlex/ScaleIO template to volume - logger.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); + logger.debug("Initiating copy from PowerFlex template volume on host {}", destHost != null ? destHost : ""); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); @@ -819,7 +833,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { protected Answer copyOfflineVolume(DataObject srcData, DataObject destData, Host destHost) { // Copy PowerFlex/ScaleIO volume - logger.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); + logger.debug("Initiating copy from PowerFlex template volume on host {}", destHost != null ? destHost : ""); String value = configDao.getValue(Config.CopyVolumeWait.key()); int copyVolumeWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); @@ -861,7 +875,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { GetVolumeStatCommand statCmd = new GetVolumeStatCommand(srcVolumeInfo.getPath(), srcVolumeInfo.getStoragePoolType(), srcStore.getUuid()); GetVolumeStatAnswer statAnswer = (GetVolumeStatAnswer) ep.sendMessage(statCmd); if (!statAnswer.getResult() ) { - logger.warn(String.format("Unable to get volume %s stats", srcVolumeInfo.getId())); + logger.warn(String.format("Unable to get volume %s stats", srcVolumeInfo)); } else if (statAnswer.getVirtualSize() > 0) { srcVolumeUsableSize = statAnswer.getVirtualSize(); } @@ -882,15 +896,15 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { updateVolumeAfterCopyVolume(srcData, destData); updateSnapshotsAfterCopyVolume(srcData, destData); deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); - logger.debug(String.format("Successfully migrated migrate PowerFlex volume %d to storage pool %d", srcVolumeId, destPoolId)); + logger.debug("Successfully migrated migrate PowerFlex volume {} to storage pool {}", srcData, destStore); answer = new Answer(null, true, null); } else { - String errorMsg = "Failed to migrate PowerFlex volume: " + srcVolumeId + " to storage pool " + destPoolId; + String errorMsg = String.format("Failed to migrate PowerFlex volume: %s to storage pool %s", srcData, destStore); logger.debug(errorMsg); answer = new Answer(null, false, errorMsg); } } catch (Exception e) { - logger.error("Failed to migrate PowerFlex volume: " + srcVolumeId + " due to: " + e.getMessage()); + logger.error("Failed to migrate PowerFlex volume: {} due to: {}", srcData, e.getMessage()); answer = new Answer(null, false, e.getMessage()); } @@ -941,8 +955,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { public void updateSnapshotsAfterCopyVolume(DataObject srcData, DataObject destData) throws Exception { final long srcVolumeId = srcData.getId(); DataStore srcStore = srcData.getDataStore(); - final long srcPoolId = srcStore.getId(); - final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); + final ScaleIOGatewayClient client = getScaleIOClient(srcStore); DataStore destStore = destData.getDataStore(); final long destPoolId = destStore.getId(); @@ -951,7 +964,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { List snapshots = snapshotDao.listByVolumeId(srcVolumeId); if (CollectionUtils.isNotEmpty(snapshots)) { for (SnapshotVO snapshot : snapshots) { - SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findByStoreSnapshot(DataStoreRole.Primary, srcPoolId, snapshot.getId()); + SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findByStoreSnapshot(DataStoreRole.Primary, srcStore.getId(), snapshot.getId()); if (snapshotStore == null) { continue; } @@ -979,7 +992,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { String errMsg; try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath); - final ScaleIOGatewayClient client = getScaleIOClient(srcStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(srcStore); Boolean deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete source PowerFlex volume with id: " + scaleIOVolumeId; @@ -1000,7 +1013,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { String errMsg; try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(destVolumePath); - final ScaleIOGatewayClient client = getScaleIOClient(destStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(destStore); Boolean deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId; @@ -1079,7 +1092,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { long srcPoolId = srcStore.getId(); long destPoolId = destStore.getId(); - final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); + final ScaleIOGatewayClient client = getScaleIOClient(srcStore); final String srcVolumePath = ((VolumeInfo) srcData).getPath(); final String srcVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath); final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId); @@ -1144,12 +1157,12 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { answer = new Answer(null, true, null); } else { - String errorMsg = "Failed to migrate PowerFlex volume: " + srcData.getId() + " to storage pool " + destPoolId; + String errorMsg = String.format("Failed to migrate PowerFlex volume: %s to storage pool %d", srcData, destPoolId); logger.debug(errorMsg); answer = new Answer(null, false, errorMsg); } } catch (Exception e) { - logger.error("Failed to migrate PowerFlex volume: " + srcData.getId() + " due to: " + e.getMessage()); + logger.error("Failed to migrate PowerFlex volume: {} due to: {}", srcData, e.getMessage()); answer = new Answer(null, false, e.getMessage()); } @@ -1206,7 +1219,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(volumeInfo.getPath()); Long storagePoolId = volumeInfo.getPoolId(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize(); @@ -1228,7 +1242,6 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } - StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); boolean attachedRunning = false; long hostId = 0; @@ -1315,7 +1328,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePool.setUsedBytes(Math.min(usedBytes, capacityBytes)); storagePoolDao.update(storagePoolId, storagePool); } catch (Exception e) { - String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo + " due to " + e.getMessage(); logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1377,12 +1390,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { Map customStats = new HashMap<>(); try { - final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(pool); int connectedSdcsCount = client.getConnectedSdcsCount(); customStats.put(ScaleIOUtil.CONNECTED_SDC_COUNT_STAT, String.valueOf(connectedSdcsCount)); } catch (Exception e) { - String errMsg = "Unable to get custom storage stats for the pool: " + pool.getId() + " due to " + e.getMessage(); - logger.error(errMsg); + logger.error("Unable to get custom storage stats for the pool: {} due to {}", pool, e.getMessage()); } return customStats; @@ -1393,7 +1405,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(storagePool.getPath()); if (poolStatistics != null && poolStatistics.getNetMaxCapacityInBytes() != null && poolStatistics.getNetUsedCapacityInBytes() != null) { Long capacityBytes = poolStatistics.getNetMaxCapacityInBytes(); @@ -1401,7 +1413,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { return new Pair(capacityBytes, usedBytes); } } catch (Exception e) { - String errMsg = "Unable to get storage stats for the pool: " + storagePool.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to get storage stats for the pool: " + storagePool + " due to " + e.getMessage(); logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1420,7 +1432,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { Preconditions.checkArgument(StringUtils.isNotEmpty(volumePath), "volumePath cannot be null"); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); VolumeStatistics volumeStatistics = client.getVolumeStatistics(ScaleIOUtil.getVolumePath(volumePath)); if (volumeStatistics != null) { Long provisionedSizeInBytes = volumeStatistics.getNetProvisionedAddressesInBytes(); @@ -1428,7 +1440,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); } } catch (Exception e) { - String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool + " due to " + e.getMessage(); logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1447,10 +1459,10 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { if (poolHostVO == null) { return false; } - final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(pool); return client.isSdcConnected(poolHostVO.getLocalPath()); } catch (Exception e) { - logger.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); + logger.warn("Unable to check the host: {} access to storage pool: {} due to {}", host, pool, e.getMessage(), e); return false; } } @@ -1470,8 +1482,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { return; } - logger.warn("SDC not connected on the host: " + host.getId()); - String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM"; + logger.warn("SDC not connected on the host: {}", host); + String msg = String.format("SDC not connected on the host: %s, reconnect the SDC to MDM", host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index 7bbe0331c07..38f9dc20fbd 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -262,23 +262,23 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); if (hostsInCluster.isEmpty()) { primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId()); + throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster); } - logger.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); + logger.debug("Attaching the pool to each of the hosts in the cluster: {}", cluster); List poolHosts = new ArrayList(); for (HostVO host : hostsInCluster) { try { - if (storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId())) { + if (storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId())) { poolHosts.add(host); } } catch (Exception e) { - logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "on the cluster: " + primaryDataStoreInfo.getClusterId(), e); + logger.warn(String.format("Unable to establish a connection between host: %s and pool: %s on the cluster: %s", host, dataStore, cluster), e); } } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + logger.warn("No host can access storage pool '{}' on cluster '{}'.", primaryDataStoreInfo, cluster); } dataStoreHelper.attachCluster(dataStore); @@ -301,7 +301,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - if (storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId())) { + if (storageMgr.connectHostToSharedPool(host, dataStore.getId())) { poolHosts.add(host); } } catch (Exception e) { @@ -360,17 +360,17 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy DeleteStoragePoolCommand deleteStoragePoolCommand = new DeleteStoragePoolCommand(storagePool); final Answer answer = agentMgr.easySend(poolHostVO.getHostId(), deleteStoragePoolCommand); if (answer != null && answer.getResult()) { - logger.info("Successfully deleted storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + logger.info("Successfully deleted storage pool: {} from host: {}", storagePool, poolHostVO.getHostId()); } else { if (answer != null) { - logger.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId() + " , result: " + answer.getResult()); + logger.error("Failed to delete storage pool: {} from host: {} , result: {}", storagePool, poolHostVO.getHostId(), answer.getResult()); } else { - logger.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + logger.error("Failed to delete storage pool: {} from host: {}", storagePool, poolHostVO.getHostId()); } } } - ScaleIOGatewayClientConnectionPool.getInstance().removeClient(dataStore.getId()); + ScaleIOGatewayClientConnectionPool.getInstance().removeClient(dataStore); return dataStoreHelper.deletePrimaryDataStore(dataStore); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java index 4d3a78f6875..f1177acc7b4 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java @@ -29,7 +29,9 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -70,6 +72,8 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { @Inject StoragePoolHostDao storagePoolHostDao; @Inject + private PrimaryDataStoreDao storagePoolDao; + @Inject StoragePoolDetailsDao storagePoolDetailsDao; @Inject ConfigurationDao configDao; @@ -83,6 +87,7 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { @Override public boolean areSDCConnectionsWithinLimit(Long storagePoolId) { + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); try { int connectedClientsLimit = StorageManager.STORAGE_POOL_CONNECTED_CLIENTS_LIMIT.valueIn(storagePoolId); if (connectedClientsLimit <= 0) { @@ -91,13 +96,19 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { int connectedSdcsCount = getScaleIOClient(storagePoolId).getConnectedSdcsCount(); if (connectedSdcsCount < connectedClientsLimit) { - logger.debug(String.format("Current connected SDCs count: %d - SDC connections are within the limit (%d) on PowerFlex Storage with pool id: %d", connectedSdcsCount, connectedClientsLimit, storagePoolId)); + logger.debug("Current connected SDCs count: {} - SDC connections are " + + "within the limit ({}) on PowerFlex Storage with pool {}", + connectedSdcsCount, connectedClientsLimit, storagePool); return true; } - logger.debug(String.format("Current connected SDCs count: %d - SDC connections limit (%d) reached on PowerFlex Storage with pool id: %d", connectedSdcsCount, connectedClientsLimit, storagePoolId)); + logger.debug("Current connected SDCs count: {} - SDC connections limit ({}) " + + "reached on PowerFlex Storage with pool {}", + connectedSdcsCount, connectedClientsLimit, storagePool); return false; } catch (Exception e) { - String errMsg = "Unable to check SDC connections for the PowerFlex storage pool with id: " + storagePoolId + " due to " + e.getMessage(); + String errMsg = String.format( + "Unable to check SDC connections for the PowerFlex storage pool %s due to %s", + storagePool, e.getMessage()); logger.warn(errMsg, e); return false; } @@ -134,7 +145,8 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { long hostId = host.getId(); String sdcId = getConnectedSdc(host, dataStore); if (StringUtils.isNotBlank(sdcId)) { - logger.debug(String.format("SDC %s already connected for the pool: %d on host: %d, no need to prepare/start it", sdcId, poolId, hostId)); + logger.debug("SDC {} already connected for the pool: {} on host: {}, " + + "no need to prepare/start it", sdcId, dataStore, host); return sdcId; } @@ -174,7 +186,7 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { } int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs) - if (hostSdcConnected(sdcId, poolId, waitTimeInSecs)) { + if (hostSdcConnected(sdcId, dataStore, waitTimeInSecs)) { return sdcId; } return null; @@ -191,7 +203,7 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { } private String prepareSDCOnHost(Host host, DataStore dataStore, String systemId) { - logger.debug(String.format("Preparing SDC on the host %s (%s)", host.getId(), host.getName())); + logger.debug("Preparing SDC on the host {}", host); Map details = new HashMap<>(); details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); PrepareStorageClientCommand cmd = new PrepareStorageClientCommand(((PrimaryDataStore) dataStore).getPoolType(), dataStore.getUuid(), details); @@ -202,26 +214,26 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { try { prepareStorageClientAnswer = (PrepareStorageClientAnswer) agentManager.send(host.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Failed to prepare SDC on the host %s, due to: %s", host.getName(), e.getMessage()); + String err = String.format("Failed to prepare SDC on the host %s, due to: %s", host, e.getMessage()); logger.error(err); throw new CloudRuntimeException(err); } if (prepareStorageClientAnswer == null) { - String err = String.format("Unable to prepare SDC on the host %s", host.getName()); + String err = String.format("Unable to prepare SDC on the host %s", host); logger.error(err); throw new CloudRuntimeException(err); } if (!prepareStorageClientAnswer.getResult()) { - String err = String.format("Unable to prepare SDC on the host %s, due to: %s", host.getName(), prepareStorageClientAnswer.getDetails()); + String err = String.format("Unable to prepare SDC on the host %s, due to: %s", host, prepareStorageClientAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } Map poolDetails = prepareStorageClientAnswer.getDetailsMap(); if (MapUtils.isEmpty(poolDetails)) { - logger.warn(String.format("PowerFlex storage SDC details not found on the host: %s, try (re)install SDC and restart agent", host.getId())); + logger.warn("PowerFlex storage SDC details not found on the host: {}, try (re)install SDC and restart agent", host); return null; } @@ -230,11 +242,11 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { sdcId = poolDetails.get(ScaleIOGatewayClient.SDC_ID); } else if (poolDetails.containsKey(ScaleIOGatewayClient.SDC_GUID)) { String sdcGuid = poolDetails.get(ScaleIOGatewayClient.SDC_GUID); - sdcId = getHostSdcId(sdcGuid, dataStore.getId()); + sdcId = getHostSdcId(sdcGuid, dataStore); } if (StringUtils.isBlank(sdcId)) { - logger.warn(String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, try (re)install SDC and restart agent", host.getId())); + logger.warn("Couldn't retrieve PowerFlex storage SDC details from the host: {}, try (re)install SDC and restart agent", host); return null; } @@ -250,7 +262,7 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { String systemId = storagePoolDetailsDao.findDetail(dataStore.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); if (systemId == null) { - throw new CloudRuntimeException("Unable to unprepare SDC, failed to get the system id for PowerFlex storage pool: " + dataStore.getName()); + throw new CloudRuntimeException("Unable to unprepare SDC, failed to get the system id for PowerFlex storage pool: " + dataStore); } GlobalLock lock = null; @@ -294,27 +306,25 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { try { unprepareStorageClientAnswer = agentManager.send(host.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Failed to unprepare SDC on the host %s due to: %s", host.getName(), e.getMessage()); - logger.error(err); + logger.error("Failed to unprepare SDC on the host {} due to: {}", host, e.getMessage()); return false; } if (!unprepareStorageClientAnswer.getResult()) { - String err = String.format("Unable to unprepare SDC on the the host %s due to: %s", host.getName(), unprepareStorageClientAnswer.getDetails()); - logger.error(err); + logger.error("Unable to unprepare SDC on the the host {} due to: {}", host, unprepareStorageClientAnswer.getDetails()); return false; } return true; } - private String getHostSdcId(String sdcGuid, long poolId) { + private String getHostSdcId(String sdcGuid, DataStore dataStore ) { try { - logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); - ScaleIOGatewayClient client = getScaleIOClient(poolId); + logger.debug("Try to get host SDC Id for pool: {}, with SDC guid {}", dataStore, sdcGuid); + ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); return client.getSdcIdByGuid(sdcGuid); } catch (Exception e) { - logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); - throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId)); + logger.error(String.format("Failed to get host SDC Id for pool: %s", dataStore), e); + throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", dataStore)); } } @@ -333,14 +343,18 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { return poolHostVO.getLocalPath(); } } catch (Exception e) { - logger.warn("Unable to get connected SDC for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); + logger.warn( + String.format("Unable to get connected SDC for the host: %s and storage pool: %s due to %s", + host, dataStore, e.getMessage()), e); } return null; } - private boolean hostSdcConnected(String sdcId, long poolId, int waitTimeInSecs) { - logger.debug(String.format("Waiting (for %d secs) for the SDC %s of the pool id: %d to connect", waitTimeInSecs, sdcId, poolId)); + private boolean hostSdcConnected(String sdcId, DataStore dataStore, int waitTimeInSecs) { + long poolId = dataStore.getId(); + logger.debug(String.format("Waiting (for %d secs) for the SDC %s of the pool %s to connect", + waitTimeInSecs, sdcId, dataStore)); int timeBetweenTries = 1000; // Try more frequently (every sec) and return early if connected while (waitTimeInSecs > 0) { if (isHostSdcConnected(sdcId, poolId)) { @@ -366,7 +380,8 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { } private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); } @Override diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index 737cc818be8..5fc4868902e 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; @@ -89,7 +90,7 @@ public class ScaleIOHostListener implements HypervisorHostListener { storagePoolHost.setLocalPath(sdcId); _storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost); } - logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId); + logger.info("Connection established between storage pool: {} and host: {}", storagePool, host); } return true; } @@ -105,10 +106,10 @@ public class ScaleIOHostListener implements HypervisorHostListener { details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool, storagePool.getPath(), details); - ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId); + ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, host); Map poolDetails = answer.getPoolInfo().getDetails(); if (MapUtils.isEmpty(poolDetails)) { - String msg = "PowerFlex storage SDC details not found on the host: " + hostId + ", (re)install SDC and restart agent"; + String msg = String.format("PowerFlex storage SDC details not found on the host: %s, (re)install SDC and restart agent", host); logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC not found on host: " + host.getUuid(), msg); return null; @@ -123,7 +124,7 @@ public class ScaleIOHostListener implements HypervisorHostListener { } if (StringUtils.isBlank(sdcId)) { - String msg = "Couldn't retrieve PowerFlex storage SDC details from the host: " + hostId + ", (re)install SDC and restart agent"; + String msg = String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, (re)install SDC and restart agent", host); logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg); return null; @@ -133,33 +134,36 @@ public class ScaleIOHostListener implements HypervisorHostListener { } private String getHostSdcId(String sdcGuid, long poolId) { + StoragePoolVO storagePool = _primaryDataStoreDao.findById(poolId); try { - logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); - ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); + logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", storagePool, sdcGuid)); + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, _storagePoolDetailsDao); return client.getSdcIdByGuid(sdcGuid); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); - throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId)); + logger.error(String.format("Failed to get host SDC Id for pool: %s", storagePool), e); + throw new CloudRuntimeException(String.format( + "Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", + storagePool)); } } - private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, HostVO host) { + Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getName() + ")"); } if (!answer.getResult()) { - String msg = "Unable to attach PowerFlex storage pool " + storagePool.getId() + " to host " + hostId; + String msg = "Unable to attach PowerFlex storage pool " + storagePool + " to host " + host.getUuid(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish a connection from agent to PowerFlex storage pool " + storagePool.getId() + " due to " + answer.getDetails() + + throw new CloudRuntimeException("Unable to establish a connection from agent to PowerFlex storage pool " + storagePool + " due to " + answer.getDetails() + " (" + storagePool.getId() + ")"); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; PowerFlex Storage Pool = " + storagePool.getId() + " Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; PowerFlex Storage Pool = " + storagePool.getId() + " Host = " + host; return (ModifyStoragePoolAnswer) answer; } diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java index 4979fd1fa0a..921dd3d4d9f 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java @@ -386,14 +386,13 @@ public class ScaleIOPrimaryDataStoreDriverTest { when(srcData.getPath()).thenReturn("bec0ba7700000007:vol-11-6aef-10ee"); when(srcData.getFolder()).thenReturn("921c364500000007"); DataStore destStore = Mockito.mock(DataStore.class); - when(destStore.getId()).thenReturn(2L); when(destData.getDataStore()).thenReturn(destStore); doNothing().when(scaleIOPrimaryDataStoreDriver) .revokeAccess(any(), any(), any()); ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); when(client.deleteVolume(any())).thenReturn(true); VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT); @@ -424,13 +423,12 @@ public class ScaleIOPrimaryDataStoreDriverTest { when(srcData.getPath()).thenReturn(srcVolumePath); when(srcData.getFolder()).thenReturn("921c364500000007"); DataStore destStore = Mockito.mock(DataStore.class); - when(destStore.getId()).thenReturn(2L); when(destData.getDataStore()).thenReturn(destStore); doNothing().when(scaleIOPrimaryDataStoreDriver).revokeAccess(any(), any(), any()); ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); when(client.deleteVolume(any())).thenReturn(false); VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT); @@ -461,7 +459,7 @@ public class ScaleIOPrimaryDataStoreDriverTest { ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); when(client.deleteVolume(any())).thenReturn(true); scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); @@ -473,21 +471,19 @@ public class ScaleIOPrimaryDataStoreDriverTest { VolumeInfo srcData = Mockito.mock(VolumeInfo.class); Host host = Mockito.mock(Host.class); - when(host.getId()).thenReturn(1L); String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee"; DataStore srcStore = Mockito.mock(DataStore.class); - when(srcStore.getId()).thenReturn(1L); DataTO volumeTO = Mockito.mock(DataTO.class); when(srcData.getDataStore()).thenReturn(srcStore); when(srcData.getTO()).thenReturn(volumeTO); when(volumeTO.getPath()).thenReturn(srcVolumePath); String sdcId = "7332760565f6340f"; - doReturn(sdcId).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(1L, 1L); + doReturn(sdcId).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(srcStore, host); ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); doReturn(true).when(client).unmapVolumeFromSdc(any(), any()); when(client.deleteVolume(any())).thenReturn(false); @@ -504,13 +500,12 @@ public class ScaleIOPrimaryDataStoreDriverTest { String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee"; DataStore srcStore = Mockito.mock(DataStore.class); - when(srcStore.getId()).thenReturn(1L); DataTO volumeTO = Mockito.mock(DataTO.class); when(srcData.getDataStore()).thenReturn(srcStore); when(srcData.getTO()).thenReturn(volumeTO); when(volumeTO.getPath()).thenReturn(srcVolumePath); String sdcId = "7332760565f6340f"; - doReturn(null).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(1L, 1L); + doReturn(null).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(srcStore, host); scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); } diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index 52dcad51942..dbeba0e4bde 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -131,7 +131,7 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class); ScaleIOGatewayClientConnectionPool pool = mock(ScaleIOGatewayClientConnectionPool.class); scaleIOGatewayClientConnectionPoolMocked.when(() -> ScaleIOGatewayClientConnectionPool.getInstance()).thenReturn(pool); - lenient().when(pool.getClient(1L, storagePoolDetailsDao)).thenReturn(client); + lenient().when(pool.getClient(dataStore, storagePoolDetailsDao)).thenReturn(client); lenient().when(client.haveConnectedSdcs()).thenReturn(true); @@ -150,14 +150,11 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { when(resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(hostList); when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); - when(store.getId()).thenReturn(1L); when(store.isShared()).thenReturn(true); - when(store.getName()).thenReturn("ScaleIOPool"); when(store.getStorageProviderName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); when(dataStoreProviderMgr.getDataStoreProvider(ScaleIOUtil.PROVIDER_NAME)).thenReturn(dataStoreProvider); when(dataStoreProvider.getName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); - when(hostListener.hostConnect(Mockito.anyLong(), Mockito.anyLong())).thenReturn(true); storageMgr.registerHostListener(ScaleIOUtil.PROVIDER_NAME, hostListener); when(dataStoreHelper.attachZone(Mockito.any(DataStore.class))).thenReturn(null); diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 04f9045f570..ba23566c3fd 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -215,7 +215,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } if (isRevokeAccessNotNeeded(dataObject)) { - logger.debug("Skipping revoke access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); + logger.debug("Skipping revoke access for Solidfire data object type: {} id: {} uuid: {}", + dataObject.getType(), dataObject.getId(), dataObject.getUuid()); return; } @@ -235,7 +236,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { throw new CloudRuntimeException(errMsg); } - logger.debug("Revoking access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); + logger.debug("Revoking access for Solidfire data object type: {} id: {} uuid: {}", + dataObject.getType(), dataObject.getId(), dataObject.getUuid()); try { SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); @@ -951,7 +953,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { result.setResult(null); } catch (Exception ex) { - logger.debug(SolidFireUtil.LOGGER_PREFIX + "Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex); + logger.debug("{}Failed to take CloudStack snapshot: {}", SolidFireUtil.LOGGER_PREFIX, snapshotInfo.getSnapshotVO(), ex); result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString())); @@ -1269,7 +1271,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } catch (Exception ex) { - logger.debug(SolidFireUtil.LOGGER_PREFIX + "Failed to delete SolidFire volume. CloudStack volume ID: " + volumeInfo.getId(), ex); + logger.debug("{}Failed to delete SolidFire volume. CloudStack volume {}", SolidFireUtil.LOGGER_PREFIX, volumeInfo.getVolume(), ex); throw ex; } @@ -1312,7 +1314,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePoolDao.update(storagePoolId, storagePool); } catch (Exception ex) { - logger.debug(SolidFireUtil.LOGGER_PREFIX + "Issue in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, ex); + logger.debug("{}Issue in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot: {}", SolidFireUtil.LOGGER_PREFIX, snapshotInfo.getSnapshotVO(), ex); throw ex; } @@ -1336,7 +1338,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePoolDao.update(storagePoolId, storagePool); } catch (Exception ex) { - logger.debug(SolidFireUtil.LOGGER_PREFIX + "Failed to delete SolidFire template volume. CloudStack template ID: " + template.getId(), ex); + logger.debug("{}Failed to delete SolidFire template volume. CloudStack template: {}", SolidFireUtil.LOGGER_PREFIX, template, ex); throw ex; } @@ -1508,13 +1510,13 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { long newSizeWithHsr = (long)(newSize + newSize * (newHypervisorSnapshotReserve / 100f)); if (newSizeWithHsr < currentSizeWithHsr) { - throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not support shrinking a volume."); + throw new CloudRuntimeException(String.format("Storage pool %s does not support shrinking a volume.", storagePool)); } long availableBytes = storagePool.getCapacityBytes() - getUsedBytes(storagePool); if ((newSizeWithHsr - currentSizeWithHsr) > availableBytes) { - throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not have enough space to expand the volume."); + throw new CloudRuntimeException(String.format("Storage pool %s does not have enough space to expand the volume.", storagePool)); } } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index 0b5393bd8d8..1dbbf458b48 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -241,7 +241,7 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } @@ -265,7 +265,7 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index 4877e86bf9f..482fa23096a 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -389,7 +389,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto if (allHosts.isEmpty()) { primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primaryDataStoreInfo.getClusterId()))); } boolean success = false; @@ -403,14 +403,14 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto } if (!success) { - throw new CloudRuntimeException("Unable to create storage in cluster " + primaryDataStoreInfo.getClusterId()); + throw new CloudRuntimeException("Unable to create storage in cluster " + clusterDao.findById(primaryDataStoreInfo.getClusterId())); } List poolHosts = new ArrayList<>(); for (HostVO host : allHosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); poolHosts.add(host); } catch (Exception e) { @@ -419,7 +419,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + logger.warn("No host can access storage pool '{}' on cluster '{}'.", primaryDataStoreInfo, clusterDao.findById(primaryDataStoreInfo.getClusterId())); primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); @@ -470,9 +470,9 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto final String msg; if (answer != null) { - msg = "Cannot create storage pool through host '" + hostId + "' due to the following: " + answer.getDetails(); + msg = String.format("Cannot create storage pool through host '%s' due to the following: %s", host, answer.getDetails()); } else { - msg = "Cannot create storage pool through host '" + hostId + "' due to CreateStoragePoolCommand returns null"; + msg = String.format("Cannot create storage pool through host '%s' due to CreateStoragePoolCommand returns null", host); } logger.warn(msg); @@ -558,23 +558,21 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd); if (answer != null && answer.getResult()) { - logger.info("Successfully deleted storage pool using Host ID " + host.getHostId()); - HostVO hostVO = hostDao.findById(host.getHostId()); - if (hostVO != null) { clusterId = hostVO.getClusterId(); hostId = hostVO.getId(); } - + logger.info("Successfully deleted storage pool using Host {} with ID {}", hostVO, host.getHostId()); break; } else { + HostVO hostVO = hostDao.findById(host.getHostId()); if (answer != null) { - logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult()); + logger.error("Failed to delete storage pool using Host {} with ID: {}: {}", hostVO, host.getHostId(), answer.getResult()); } else { - logger.error("Failed to delete storage pool using Host ID " + host.getHostId()); + logger.error("Failed to delete storage pool using Host {} with ID: {}", hostVO, host.getHostId()); } } } @@ -646,12 +644,12 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); cmd.setRemoveAsync(true); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; @@ -659,7 +657,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index d8473428393..052191128f1 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -25,6 +25,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.host.Host; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -105,10 +106,10 @@ public class SolidFireHostListener implements HypervisorHostListener { } if (host.getHypervisorType().equals(HypervisorType.XenServer)) { - handleXenServer(host.getClusterId(), host.getId(), storagePoolId); + handleXenServer(host.getClusterId(), host, storagePoolId); } else if (host.getHypervisorType().equals(HypervisorType.KVM)) { - handleKVM(hostId, storagePoolId); + handleKVM(host, storagePoolId); } return true; @@ -147,7 +148,7 @@ public class SolidFireHostListener implements HypervisorHostListener { return true; } - private void handleXenServer(long clusterId, long hostId, long storagePoolId) { + private void handleXenServer(long clusterId, Host host, long storagePoolId) { List storagePaths = getStoragePaths(clusterId, storagePoolId); StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); @@ -157,7 +158,7 @@ public class SolidFireHostListener implements HypervisorHostListener { cmd.setStoragePath(storagePath); - sendModifyStoragePoolCommand(cmd, storagePool, hostId); + sendModifyStoragePoolCommand(cmd, storagePool, host); } } @@ -181,17 +182,17 @@ public class SolidFireHostListener implements HypervisorHostListener { cmd.setTargetTypeToRemove(targetTypeToRemove); cmd.setRemoveAsync(true); - sendModifyTargetsCommand(cmd, host.getId()); + sendModifyTargetsCommand(cmd, host); } } } - private void handleKVM(long hostId, long storagePoolId) { + private void handleKVM(Host host, long storagePoolId) { StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); - sendModifyStoragePoolCommand(cmd, storagePool, hostId); + sendModifyStoragePoolCommand(cmd, storagePool, host); } private List getStoragePaths(long clusterId, long storagePoolId) { @@ -260,17 +261,15 @@ public class SolidFireHostListener implements HypervisorHostListener { return targets; } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } if (!answer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; - - HostVO host = hostDao.findById(hostId); + String msg = String.format("Unable to modify targets on the following host: %s", host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); @@ -278,24 +277,23 @@ public class SolidFireHostListener implements HypervisorHostListener { } } - private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", storagePool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + String msg = String.format("Unable to attach storage pool %s to host %s", storagePool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + - " (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format("Unable to establish a connection from agent to storage pool %s due to %s", storagePool, answer.getDetails())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format("ModifyStoragePoolAnswer expected ; Pool = %s Host = %s", storagePool, host); - logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + logger.info("Connection established between storage pool {} and host {}", storagePool, host); } } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java index 98c8bfb51c1..f746390085b 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java @@ -25,6 +25,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.host.Host; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -187,23 +188,21 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { cmd.setTargetTypeToRemove(targetTypeToRemove); cmd.setRemoveAsync(true); - sendModifyTargetsCommand(cmd, host.getId()); + sendModifyTargetsCommand(cmd, host); } } } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } if (!answer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; - - HostVO host = hostDao.findById(hostId); + String msg = String.format("Unable to modify targets on the following host: %s", host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); @@ -215,21 +214,22 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command for storage pool: %s", storagePool)); } + HostVO host = hostDao.findById(hostId); if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePool.getId() + " to the host " + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %s", storagePool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); throw new CloudRuntimeException(msg); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " + - storagePool.getId() + "; Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : + String.format("ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = %s; Host = %s", storagePool, host); - logger.info("Connection established between storage pool " + storagePool + " and host " + hostId); + logger.info("Connection established between storage pool {} and host {}", storagePool, host); return (ModifyStoragePoolAnswer)answer; } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index 671431f4163..68f0ff4bf37 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -540,7 +540,7 @@ public class SolidFireUtil { if (sfVag.getInitiators().length < MAX_NUM_INITIATORS_PER_VAG) { if (!hostSupports_iScsi(host)) { - String errMsg = "Host with ID " + host.getId() + " does not support iSCSI."; + String errMsg = String.format("Host %s does not support iSCSI.", host); LOGGER.warn(errMsg); @@ -562,7 +562,7 @@ public class SolidFireUtil { if (numVags > 0) { if (!hostSupports_iScsi(host)) { - String errMsg = "Host with ID " + host.getId() + " does not support iSCSI."; + String errMsg = String.format("Host %s does not support iSCSI.", host); LOGGER.warn(errMsg); diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java index b3f49b015d1..00746334e02 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java @@ -92,7 +92,8 @@ public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); for (HostVO host : kvmHosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn(String.format("Unable to establish a connection between host %s and pool %s due to %s", host, dataStore, e)); } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java index b696990c533..7e0986bc63b 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java @@ -114,7 +114,7 @@ public class StorPoolHostListener implements HypervisorHostListener { if (!isCurrentVersionSupportsEverythingFromPrevious) { String msg = "The current StorPool driver does not support all functionality from the one before upgrade to CS"; StorPoolUtil.spLog("Storage pool [%s] is not connected to host [%s] because the functionality after the upgrade is not full", - poolId, hostId); + pool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); return false; } @@ -126,24 +126,23 @@ public class StorPoolHostListener implements HypervisorHostListener { boolean isPoolConnectedToTheHost = poolHost != null; if (answer == null) { - StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO.getName(), host.getName()); + StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO, host); deleteVolumeWhenHostCannotConnectPool(conn, volumeOnPool); removePoolOnHost(poolHost, isPoolConnectedToTheHost); - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command for pool %s", pool)); } if (!answer.getResult()) { - StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO.getName(), host.getName()); + StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO, host); removePoolOnHost(poolHost, isPoolConnectedToTheHost); if (answer.getDetails() != null && isStorPoolVolumeOrStorageNotExistsOnHost(answer)) { deleteVolumeWhenHostCannotConnectPool(conn, volumeOnPool); return false; } - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %s", pool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + - pool.getId()); + throw new CloudRuntimeException(String.format("Unable establish connection from storage head to storage pool %s due to %s", pool, answer.getDetails())); } StorPoolModifyStoragePoolAnswer mspAnswer = (StorPoolModifyStoragePoolAnswer)answer; @@ -152,9 +151,8 @@ public class StorPoolHostListener implements HypervisorHostListener { List localStoragePools = primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); - throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" - + localStoragePool.getName()); + logger.warn("Storage pool: {} has already been added as local storage: {}", pool, localStoragePool); + throw new StorageConflictException(String.format("Cannot add shared storage pool: %s because it has already been added as local storage: %s", pool, localStoragePool)); } } } @@ -173,7 +171,7 @@ public class StorPoolHostListener implements HypervisorHostListener { StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao); - StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO.getName(), host.getName()); + StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO, host); return true; } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java index 41e9676bb11..e7ea0900112 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java @@ -193,7 +193,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { CopyCmdAnswer answer = null; String err = null; if (res.getError() != null) { - logger.debug(String.format("Could not create volume from snapshot with ID=%s", snapshot.getId())); + logger.debug("Could not create volume from snapshot [ID: {}, name: {}]", snapshot.getId(), snapshot.getName()); StorPoolUtil.spLog("Volume create failed with error=%s", res.getError().getDescr()); err = res.getError().getDescr(); } else { @@ -221,7 +221,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { if (answer != null && answer.getResult()) { SpApiResponse resSnapshot = StorPoolUtil.volumeFreeze(volumeName, conn); if (resSnapshot.getError() != null) { - logger.debug(String.format("Could not snapshot volume with ID=%s", snapshot.getId())); + logger.debug("Could not snapshot volume [id: {}, name: {}]", snapshot.getId(), snapshot.getName()); StorPoolUtil.spLog("Volume freeze failed with error=%s", resSnapshot.getError().getDescr()); err = resSnapshot.getError().getDescr(); StorPoolUtil.volumeDelete(volumeName, conn); @@ -297,7 +297,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { for (Map.Entry entry : volumeDataStoreMap.entrySet()) { VolumeInfo srcVolumeInfo = entry.getKey(); if (srcVolumeInfo.getPassphraseId() != null) { - throw new CloudRuntimeException(String.format("Cannot live migrate encrypted volume [%s] to StorPool", srcVolumeInfo.getName())); + throw new CloudRuntimeException(String.format("Cannot live migrate encrypted volume [%s] to StorPool", srcVolumeInfo.getVolume())); } DataStore destDataStore = entry.getValue(); @@ -388,7 +388,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { errMsg = String.format( "Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in StorPoolDataMotionStrategy.copyAsync. Error message: [%s].", - vmTO.getId(), srcHost.getId(), destHost.getId(), ex.getMessage()); + vmTO, srcHost, destHost, ex.getMessage()); logger.error(errMsg, ex); throw new CloudRuntimeException(errMsg); @@ -524,13 +524,13 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { private String connectHostToVolume(Host host, long storagePoolId, String iqn) { ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, true); - return sendModifyTargetsCommand(modifyTargetsCommand, host.getId()).get(0); + return sendModifyTargetsCommand(modifyTargetsCommand, host).get(0); } private void disconnectHostFromVolume(Host host, long storagePoolId, String iqn) { ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, false); - sendModifyTargetsCommand(modifyTargetsCommand, host.getId()); + sendModifyTargetsCommand(modifyTargetsCommand, host); } private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String iqn, boolean add) { @@ -558,15 +558,15 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { return cmd; } - private List sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer) _agentManager.easySend(hostId, cmd); + private List sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer) _agentManager.easySend(host.getId(), cmd); if (modifyTargetsAnswer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } if (!modifyTargetsAnswer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; + String msg = String.format("Unable to modify targets on the following host: %s", host); throw new CloudRuntimeException(msg); } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java index c7bcc8a46b7..5ec86df91e1 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java @@ -121,7 +121,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { } else { res = deleteSnapshotFromDbIfNeeded(snapshotVO, zoneId); markSnapshotAsDestroyedIfAlreadyRemoved(snapshotId,true); - StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot uuid=%s, name=%s", res, snapshotVO.getUuid(), name); + StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot %s, name=%s", res, snapshotVO, name); } } catch (Exception e) { String errMsg = String.format("Cannot delete snapshot due to %s", e.getMessage()); @@ -152,7 +152,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { - logger.debug(String.format("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op)); + logger.debug("StorpoolSnapshotStrategy.canHandle: snapshot {}, op={}", snapshot, op); if (op != SnapshotOperation.DELETE) { return StrategyPriority.CANT_HANDLE; @@ -181,7 +181,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { } private boolean deleteSnapshotChain(SnapshotInfo snapshot) { - logger.debug("delete snapshot chain for snapshot: " + snapshot.getId()); + logger.debug("delete snapshot chain for snapshot: {}", snapshot); final SnapshotInfo snapOnImage = snapshot; boolean result = false; boolean resultIsSet = false; @@ -194,7 +194,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { logger.debug("the snapshot has child, can't delete it on the storage"); break; } - logger.debug("Snapshot: " + snapshot.getId() + " doesn't have children, so it's ok to delete it and its parents"); + logger.debug("Snapshot: {} doesn't have children, so it's ok to delete it and its parents", snapshot); SnapshotInfo parent = snapshot.getParent(); boolean deleted = false; if (parent != null) { @@ -216,7 +216,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - logger.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName()); + logger.debug("Delete snapshot {} from image cache store: {}", snapshot, cacheSnap.getDataStore()); cacheSnap.delete(); } } @@ -335,7 +335,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Error.equals(snapshotVO.getState()) && !Snapshot.State.Destroying.equals(snapshotVO.getState())) { - throw new InvalidParameterValueException("Can't delete snapshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status"); + throw new InvalidParameterValueException(String.format("Can't delete snapshot %s due to it is in %s Status", snapshotVO, snapshotVO.getState())); } List storeRefs = _snapshotStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image); if (zoneId != null) { diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java index 2596b6a5bde..e5b24a3f98c 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java @@ -148,7 +148,7 @@ public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy { vmSnapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(poolId), false); vmSnapshotDetailsDao.persist(vmSnapshotDetailStoragePoolId); } - StorPoolUtil.spLog("Snapshot=%s of volume=%s for a group snapshot=%s.", snapshot, vol.getUuid(), vmSnapshot.getUuid()); + StorPoolUtil.spLog("Snapshot=%s of volume=%s for a group snapshot=%s.", snapshot, vol, vmSnapshot); } } } @@ -237,8 +237,8 @@ public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy { VMSnapshotDetailsVO snapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(), volumeObjectTO.getUuid()); String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetailsVO.getValue(), true); if (snapshotName == null) { - err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s", - vmSnapshot.getUuid(), volumeObjectTO.getUuid()); + err = String.format("Could not find StorPool's snapshot vm snapshot %s and volume [id: %s, uuid: %s, name: %s]", + vmSnapshot, volumeObjectTO.getId(), volumeObjectTO.getUuid(), volumeObjectTO.getName()); logger.error("Could not delete snapshot for vm:" + err); } StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.deleteVMSnapshot snapshotName=%s", snapshotName); @@ -254,10 +254,9 @@ public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy { } if (err != null) { StorPoolUtil.spLog( - "StorpoolVMSnapshotStrategy.deleteVMSnapshot delete snapshot=%s of gropusnapshot=%s failed due to %s", - snapshotName, userVm.getInstanceName(), err); - throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " - + userVm.getInstanceName() + " failed due to " + err); + "StorpoolVMSnapshotStrategy.deleteVMSnapshot delete snapshot=%s of group snapshot=%s failed due to %s", + snapshotName, userVm, err); + throw new CloudRuntimeException(String.format("Delete vm snapshot %s of vm %s failed due to %s", vmSnapshot, userVm, err)); } } vmSnapshotDetailsDao.removeDetails(vmSnapshot.getId()); @@ -344,7 +343,7 @@ public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy { finalizeRevert(vmSnapshotVO, volumeTOs); result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); } catch (CloudRuntimeException | NoTransitionException e) { - String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot.getName(), e.getMessage()); + String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot, e.getMessage()); logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java index 087bd63c296..eada5f6df39 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java @@ -117,7 +117,7 @@ public class LdapImportUsersCmd extends BaseListCmd { _accountService.createUser(user.getUsername(), generatePassword(), user.getFirstname(), user.getLastname(), user.getEmail(), timezone, accountName, domain.getId(), UUID.randomUUID().toString(), User.Source.LDAP); } else { - logger.debug("Account [name=%s] and user [name=%s] already exist in CloudStack. Executing the user update."); + logger.debug("Account [name={}] and user [name={}] already exist in CloudStack. Executing the user update.", account, csuser); UpdateUserCmd updateUserCmd = new UpdateUserCmd(); updateUserCmd.setId(csuser.getId()); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java index 6219fc90f81..52ece5c44f4 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java @@ -91,12 +91,12 @@ public class LinkAccountToLdapCmd extends BaseCmd { .createUserAccount(admin, "", ldapUser.getFirstname(), ldapUser.getLastname(), ldapUser.getEmail(), null, admin, Account.Type.DOMAIN_ADMIN, RoleType.DomainAdmin.getId(), domainId, null, null, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); response.setAdminId(String.valueOf(userAccount.getAccountId())); - logger.info("created an account with name " + admin + " in the given domain " + domainId); + logger.info("created an account with name {} in the given domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } catch (Exception e) { - logger.info("an exception occurred while creating account with name " + admin + " in domain " + domainId, e); + logger.info("an exception occurred while creating account with name {} in domain {} with id {}", admin, _domainService.getDomain(domainId), domainId, e); } } else { - logger.debug("an account with name " + admin + " already exists in the domain " + domainId); + logger.debug("an account with name {} already exists in the domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } } else { logger.debug("ldap user with username " + admin + " is disabled in the given group/ou"); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java index d5187f99c99..c351924de6d 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java @@ -107,12 +107,12 @@ public class LinkDomainToLdapCmd extends BaseCmd { UserAccount userAccount = _accountService.createUserAccount(admin, "", ldapUser.getFirstname(), ldapUser.getLastname(), ldapUser.getEmail(), null, admin, Account.Type.DOMAIN_ADMIN, RoleType.DomainAdmin.getId(), domainId, null, null, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); response.setAdminId(String.valueOf(userAccount.getAccountId())); - logger.info("created an account with name " + admin + " in the given domain " + domainId); + logger.info("created an account with name {} in the given domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } catch (Exception e) { - logger.info("an exception occurred while creating account with name " + admin +" in domain " + domainId, e); + logger.info("an exception occurred while creating account with name {} in domain {} with id {}", admin, _domainService.getDomain(domainId), domainId, e); } } else { - logger.debug("an account with name " + admin + " already exists in the domain " + domainId); + logger.debug("an account with name {} already exists in the domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } } else { logger.debug("ldap user with username "+admin+" is disabled in the given group/ou"); diff --git a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java index 62a3a809b16..adf0f98f294 100644 --- a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java +++ b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command; import com.cloud.user.Account; import com.cloud.user.AccountService; +import com.cloud.user.DomainService; import com.cloud.user.User; import com.cloud.user.UserAccountVO; import org.apache.cloudstack.acl.RoleType; @@ -45,6 +46,8 @@ public class LinkAccountToLdapCmdTest implements LdapConfigurationChanger { LdapManager ldapManager; @Mock AccountService accountService; + @Mock + DomainService domainService; LinkAccountToLdapCmd linkAccountToLdapCmd; @@ -53,6 +56,7 @@ public class LinkAccountToLdapCmdTest implements LdapConfigurationChanger { linkAccountToLdapCmd = new LinkAccountToLdapCmd(); setHiddenField(linkAccountToLdapCmd, "_ldapManager", ldapManager); setHiddenField(linkAccountToLdapCmd, "_accountService", accountService); + setHiddenField(linkAccountToLdapCmd, "_domainService", domainService); } @Test diff --git a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java index 67d0e770522..080347fefd3 100644 --- a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java +++ b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command; import com.cloud.user.Account; import com.cloud.user.AccountService; +import com.cloud.user.DomainService; import com.cloud.user.User; import com.cloud.user.UserAccountVO; import org.apache.cloudstack.acl.RoleType; @@ -44,6 +45,8 @@ public class LinkDomainToLdapCmdTest implements LdapConfigurationChanger LdapManager ldapManager; @Mock AccountService accountService; + @Mock + DomainService domainService; LinkDomainToLdapCmd linkDomainToLdapCmd; @@ -52,6 +55,7 @@ public class LinkDomainToLdapCmdTest implements LdapConfigurationChanger linkDomainToLdapCmd = new LinkDomainToLdapCmd(); setHiddenField(linkDomainToLdapCmd, "_ldapManager", ldapManager); setHiddenField(linkDomainToLdapCmd, "_accountService", accountService); + setHiddenField(linkDomainToLdapCmd, "_domainService", domainService); } @After diff --git a/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java b/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java index 3a648cdcbf0..a865ff19f7b 100644 --- a/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java +++ b/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java @@ -67,10 +67,10 @@ public class AffinityGroupAccessChecker extends DomainChecker { if (!_affinityGroupService.isAffinityGroupAvailableInDomain(group.getId(), caller.getDomainId())) { DomainVO callerDomain = _domainDao.findById(caller.getDomainId()); if (callerDomain == null) { - throw new CloudRuntimeException("cannot check permission on account " + caller.getAccountName() + " whose domain does not exist"); + throw new CloudRuntimeException(String.format("cannot check permission on account %s whose domain does not exist", caller)); } - throw new PermissionDeniedException("Affinity group is not available in domain id=" + callerDomain.getUuid()); + throw new PermissionDeniedException(String.format("Affinity group is not available in domain id=%s", callerDomain)); } else { return true; } diff --git a/server/src/main/java/com/cloud/acl/DomainChecker.java b/server/src/main/java/com/cloud/acl/DomainChecker.java index e9f60ea7aa1..97832311b17 100644 --- a/server/src/main/java/com/cloud/acl/DomainChecker.java +++ b/server/src/main/java/com/cloud/acl/DomainChecker.java @@ -137,21 +137,21 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { @Override public boolean checkAccess(Account caller, Domain domain) throws PermissionDeniedException { if (caller.getState() != Account.State.ENABLED) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + " is disabled."); + throw new PermissionDeniedException(String.format("Account %s is disabled.", caller)); } if (domain == null) { - throw new PermissionDeniedException(String.format("Provided domain is NULL, cannot check access for account [uuid=%s, name=%s]", caller.getUuid(), caller.getAccountName())); + throw new PermissionDeniedException(String.format("Provided domain is NULL, cannot check access for account [%s]", caller)); } long domainId = domain.getId(); if (_accountService.isNormalUser(caller.getId())) { if (caller.getDomainId() != domainId) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + " does not have permission to operate within domain id=" + domain.getUuid()); + throw new PermissionDeniedException(String.format("Account %s does not have permission to operate within domain id=%s", caller, domain.getUuid())); } } else if (!_domainDao.isChildDomain(caller.getDomainId(), domainId)) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + " does not have permission to operate within domain id=" + domain.getUuid()); + throw new PermissionDeniedException(String.format("Account %s does not have permission to operate within domain id=%s", caller, domain.getUuid())); } return true; @@ -187,8 +187,7 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { // account can launch a VM from this template LaunchPermissionVO permission = _launchPermissionDao.findByTemplateAndAccount(template.getId(), caller.getId()); if (permission == null) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + - " does not have permission to launch instances from template " + template.getName()); + throw new PermissionDeniedException(String.format("Account %s does not have permission to launch instances from template %s", caller, template)); } } else { // Domain admin and regular user can delete/modify only templates created by them @@ -221,8 +220,6 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { protected void validateCallerHasAccessToEntityOwner(Account caller, ControlledEntity entity, AccessType accessType) { PermissionDeniedException exception = new PermissionDeniedException("Caller does not have permission to operate with provided resource."); - String entityLog = String.format("entity [owner ID: %d, type: %s]", entity.getAccountId(), - entity.getEntityType().getSimpleName()); if (_accountService.isRootAdmin(caller.getId())) { return; @@ -233,6 +230,7 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { } Account owner = _accountDao.findById(entity.getAccountId()); + String entityLog = String.format("entity [owner: %s, type: %s]", owner, entity.getEntityType().getSimpleName()); if (owner == null) { logger.error(String.format("Owner not found for %s", entityLog)); throw exception; @@ -248,20 +246,20 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { // only project owner can delete/modify the project if (accessType == AccessType.ModifyProject) { if (!_projectMgr.canModifyProjectAccount(caller, owner.getId())) { - logger.error(String.format("Caller ID: %d does not have permission to modify project with " + - "owner ID: %d", caller.getId(), owner.getId())); + logger.error("Caller: {} does not have permission to modify project with " + + "owner: {}", caller, owner); throw exception; } } else if (!_projectMgr.canAccessProjectAccount(caller, owner.getId())) { - logger.error(String.format("Caller ID: %d does not have permission to access project with " + - "owner ID: %d", caller.getId(), owner.getId())); + logger.error("Caller: {} does not have permission to access project with " + + "owner: {}", caller, owner); throw exception; } checkOperationPermitted(caller, entity); return; } - logger.error(String.format("Caller ID: %d does not have permission to access %s", caller.getId(), entityLog)); + logger.error("Caller: {} does not have permission to access {}", caller, entityLog); throw exception; } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 99ac2492e83..4a5f80571ae 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -321,15 +321,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } if (avoid.shouldAvoid(host)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + logger.debug("Host: {} is in avoid set, skipping this and trying other available hosts", host); } continue; } //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - logger.debug(() -> String.format("Adding host [%s] to the avoid set because this host already has the max number of running (user and/or system) VMs.", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "name"))); + logger.debug("Adding host [{}] to the avoid set because this host already has the max number of running (user and/or system) VMs.", host); avoid.addHost(host.getId()); continue; } @@ -337,9 +336,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // Check if GPU device is required by offering and host has the availability if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.pciDevice.toString()); - if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - logger.debug(String.format("Adding host [%s] to avoid set, because this host does not have required GPU devices available.", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "name"))); + if(!_resourceMgr.isGPUDeviceAvailable(host, groupName.getValue(), offeringDetails.getValue())){ + logger.debug("Adding host [{}] to avoid set, because this host does not have required GPU devices available.", host); avoid.addHost(host.getId()); continue; } @@ -347,12 +345,13 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { Pair cpuCapabilityAndCapacity = _capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (cpuCapabilityAndCapacity.first() && cpuCapabilityAndCapacity.second()) { if (logger.isDebugEnabled()) { - logger.debug("Found a suitable host, adding to list: " + host.getId()); + logger.debug("Found a suitable host, adding to list: {}", host); } suitableHosts.add(host); } else { if (logger.isDebugEnabled()) { - logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + logger.debug("Not using host {}; host has cpu capability? {}, host has capacity?{}", + host, cpuCapabilityAndCapacity.first(), cpuCapabilityAndCapacity.second()); } avoid.addHost(host.getId()); } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index 51b45a2dc98..3a2f3a86d5f 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -94,7 +94,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { List vols = _volsDao.findByInstance(vm.getId()); VolumeVO vol = vols.get(0); long podId = vol.getPodId(); - logger.debug("Pod id determined from volume " + vol.getId() + " is " + podId); + logger.debug("Pod id determined from volume {} is {}", vol, podId); Iterator it = pcs.iterator(); while (it.hasNext()) { PodCluster pc = it.next(); diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java index f710e5bc846..b5fb77c8179 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java @@ -86,8 +86,8 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat long zoneId = zone.getId(); List podsInZone = _podDao.listByDataCenterId(zoneId); - if (podsInZone.size() == 0) { - logger.debug("No pods found in zone " + zone.getName()); + if (podsInZone.isEmpty()) { + logger.debug("No pods found in zone {}", zone); return null; } @@ -111,7 +111,7 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat if (!enoughCapacity) { if (logger.isDebugEnabled()) { - logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: {}, pod: {})", zone, pod); } continue; } @@ -121,7 +121,7 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat dataCenterAndPodHasEnoughCapacity(zoneId, podId, ((long)offering.getCpu() * offering.getSpeed()), Capacity.CAPACITY_TYPE_CPU, hostCandiates); if (!enoughCapacity) { if (logger.isDebugEnabled()) { - logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: {}, pod: {})", zone, pod); } continue; } @@ -144,14 +144,14 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat } } - if (availablePods.size() == 0) { - logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName()); + if (availablePods.isEmpty()) { + logger.debug("There are no pods with enough memory/CPU capacity in zone {}", zone); return null; } else { // Return a random pod int next = _rand.nextInt(availablePods.size()); HostPodVO selectedPod = availablePods.get(next); - logger.debug("Found pod " + selectedPod.getName() + " in zone " + zone.getName()); + logger.debug("Found pod {} in zone {}", selectedPod, zone); return new Pair(selectedPod, podHostCandidates.get(selectedPod.getId())); } } @@ -195,7 +195,7 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat private boolean skipCalculation(VMInstanceVO vm) { if (vm.getState() == State.Expunging) { if (logger.isDebugEnabled()) { - logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); + logger.debug("Skip counting capacity for Expunging VM: {}", vm); } return true; } @@ -261,15 +261,16 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat usedCapacity += so.getRamSize() * 1024L * 1024L; if (logger.isDebugEnabled()) { - logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " + - toHumanReadableSize(usedCapacity) + " Bytes"); + logger.debug("Counting memory capacity used by vm: {}, size: {}MB, " + + "host: {}, currently counted: {} Bytes", + vm, so.getRamSize(), hostId, toHumanReadableSize(usedCapacity)); } } else if (capacityType == Capacity.CAPACITY_TYPE_CPU) { usedCapacity += so.getCpu() * so.getSpeed(); if (logger.isDebugEnabled()) { - logger.debug("Counting cpu capacity used by vm: " + vm.getId() + ", cpu: " + so.getCpu() + ", speed: " + so.getSpeed() + ", currently counted: " + - usedCapacity + " Bytes"); + logger.debug("Counting cpu capacity used by vm: {}, cpu: {}, speed: {}, " + + "currently counted: {} Bytes", vm, so.getCpu(), so.getSpeed(), usedCapacity); } } } diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 4c4f08f12bd..a9e66c6aece 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -33,6 +33,9 @@ import javax.inject.Inject; import javax.mail.MessagingException; import javax.naming.ConfigurationException; +import com.cloud.dc.DataCenter; +import com.cloud.dc.Pod; +import com.cloud.org.Cluster; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -672,7 +675,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi logger.debug(msgSubject); logger.debug(msgContent); } - sendAlert(alertType, dc.getId(), podId, clusterId, msgSubject, msgContent); + sendAlert(alertType, dc, pod, cluster, msgSubject, msgContent); } catch (Exception ex) { logger.error("Exception in CapacityChecker", ex); } @@ -723,15 +726,25 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { - logger.warn(String.format("alertType=[%s] dataCenterId=[%s] podId=[%s] clusterId=[%s] message=[%s].", alertType, dataCenterId, podId, clusterId, subject)); + DataCenterVO zone = _dcDao.findById(dataCenterId); + HostPodVO pod = podId == null ? null : _podDao.findById(podId); + ClusterVO cluster = clusterId == null ? null : _clusterDao.findById(clusterId); + sendAlert(alertType, zone, pod, cluster, subject, content); + } + + public void sendAlert(AlertType alertType, DataCenter dataCenter, Pod pod, Cluster cluster, String subject, String content) + throws MessagingException, UnsupportedEncodingException { + logger.warn(String.format("alertType=[%s] dataCenter=[%s] pod=[%s] cluster=[%s] message=[%s].", alertType, dataCenter, pod, cluster, subject)); AlertVO alert = null; + Long clusterId = cluster == null ? null : cluster.getId(); + Long podId = pod == null ? null : pod.getId(); if ((alertType != AlertManager.AlertType.ALERT_TYPE_HOST) && (alertType != AlertManager.AlertType.ALERT_TYPE_USERVM) && (alertType != AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER) && (alertType != AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY) && (alertType != AlertManager.AlertType.ALERT_TYPE_SSVM) && (alertType != AlertManager.AlertType.ALERT_TYPE_STORAGE_MISC) && (alertType != AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE) && (alertType != AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED) && (alertType != AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED) && (alertType != AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR) && (alertType != AlertManager.AlertType.ALERT_TYPE_HA_ACTION) && (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT)) { - alert = _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId); + alert = _alertDao.getLastAlert(alertType.getType(), dataCenter.getId(), podId, clusterId); } if (alert == null) { @@ -741,7 +754,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi newAlert.setContent(content); newAlert.setClusterId(clusterId); newAlert.setPodId(podId); - newAlert.setDataCenterId(dataCenterId); + newAlert.setDataCenterId(dataCenter.getId()); newAlert.setSentCount(1); newAlert.setLastSent(new Date()); newAlert.setName(alertType.getName()); diff --git a/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java b/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java index cc993445c23..ae884ddc4d1 100644 --- a/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java +++ b/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java @@ -58,14 +58,14 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { private void onClusterNodeJoined(Object sender, ClusterNodeJoinEventArgs args) { if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getJoinedNodes()) { - logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node join alert, joined node: {} ({})", mshost.getServiceIP(), mshost); } } for (ManagementServerHostVO mshost : args.getJoinedNodes()) { if (mshost.getId() == args.getSelf().longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Management server node " + mshost.getServiceIP() + " is up, send alert"); + logger.debug("Management server node {} ({}) is up, send alert", mshost.getServiceIP(), mshost); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is up", ""); @@ -78,7 +78,7 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getLeftNodes()) { - logger.debug("Handle cluster node left alert, leaving node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node left alert, leaving node: {} ({})", mshost.getServiceIP(), mshost); } } @@ -86,13 +86,13 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { if (mshost.getId() != args.getSelf().longValue()) { if (_mshostDao.increaseAlertCount(mshost.getId()) > 0) { if (logger.isDebugEnabled()) { - logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, send alert"); + logger.debug("Detected management server node {} ({}) is down, send alert", mshost.getServiceIP(), mshost); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is down", ""); } else { if (logger.isDebugEnabled()) { - logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, but alert has already been set"); + logger.debug("Detected management server node {} ({}) is down, but alert has already been set", mshost.getServiceIP(), mshost); } } } diff --git a/server/src/main/java/com/cloud/api/ApiServer.java b/server/src/main/java/com/cloud/api/ApiServer.java index 98f87dfc3f0..824d60eec81 100644 --- a/server/src/main/java/com/cloud/api/ApiServer.java +++ b/server/src/main/java/com/cloud/api/ApiServer.java @@ -1036,8 +1036,8 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer final Account account = userAcctPair.second(); if (user.getState() != Account.State.ENABLED || !account.getState().equals(Account.State.ENABLED)) { - logger.info("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + - "; accountState: " + account.getState()); + logger.info("disabled or locked user accessing the api, user = {} (state: {}); " + + "account: {} (state: {})", user, user.getState(), account, account.getState()); return false; } @@ -1052,7 +1052,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - logger.info("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + logger.info("User does not have a secret key associated with the account -- ignoring request, username: {}", user); return false; } @@ -1097,7 +1097,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer throw new ServerApiException(ApiErrorCode.UNAUTHORIZED , errorMessage); } catch (final OriginDeniedException ex) { // in this case we can remove the session with extreme prejudice - final String errorMessage = "The user '" + user.getUsername() + "' is not allowed to execute commands from ip address '" + remoteAddress.getHostName() + "'."; + final String errorMessage = String.format("The user '%s' is not allowed to execute commands from ip address '%s'.", user, remoteAddress.getHostName()); logger.debug(errorMessage); return false; } @@ -1278,7 +1278,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.ENABLED) || (account == null) || !account.getState().equals(Account.State.ENABLED)) { - logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + logger.warn("Deleted/Disabled/Locked user [{} account={}] with id={} attempting to access public API", user, account, userId); return false; } return true; diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 976d3817a0a..570d1be814d 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -4334,7 +4334,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account: {}", account); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -4374,7 +4374,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account: {}", account); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -4604,13 +4604,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q throw new InvalidParameterValueException("Please specify a valid template ID."); }// If ISO requested then it should be ISO. if (isIso && template.getFormat() != ImageFormat.ISO) { - logger.error("Template Id " + templateId + " is not an ISO"); + logger.error("Template {} is not an ISO", template); InvalidParameterValueException ex = new InvalidParameterValueException("Specified Template Id is not an ISO"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; }// If ISO not requested then it shouldn't be an ISO. if (!isIso && template.getFormat() == ImageFormat.ISO) { - logger.error("Incorrect format of the template id " + templateId); + logger.error("Incorrect format of the template: {}", template); InvalidParameterValueException ex = new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the specified template id"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; diff --git a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java index 42a35f4412c..04816ec87b9 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java @@ -88,7 +88,7 @@ public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation) status -> { - LOGGER.debug(String.format("Persisting AS Number Range %s-%s for the zone %s", startASNumber, endASNumber, zone.getName())); + LOGGER.debug("Persisting AS Number Range {}-{} for the zone {}", startASNumber, endASNumber, zone); ASNumberRangeVO asNumberRangeVO = new ASNumberRangeVO(zoneId, startASNumber, endASNumber); asNumberRangeDao.persist(asNumberRangeVO); for (long asn = startASNumber; asn <= endASNumber; asn++) { - LOGGER.debug(String.format("Persisting AS Number %s for zone %s", asn, zone.getName())); + LOGGER.debug("Persisting AS Number {} for zone {}", asn, zone); ASNumberVO asNumber = new ASNumberVO(asn, asNumberRangeVO.getId(), zoneId); asNumberDao.persist(asNumber); } return asNumberRangeVO; }); } catch (Exception e) { - String err = String.format("Error creating AS Number range %s-%s for zone %s: %s", startASNumber, endASNumber, zone.getName(), e.getMessage()); + String err = String.format("Error creating AS Number range %s-%s for zone %s: %s", startASNumber, endASNumber, zone, e.getMessage()); LOGGER.error(err, e); throw new CloudRuntimeException(err); } @@ -207,8 +207,8 @@ public class BGPServiceImpl implements BGPService { throw new InvalidParameterException(String.format("Failed to find network with ID: %s", networkId)); } if (network.getVpcId() != null) { - LOGGER.debug(String.format("The network %s is a VPC tier, searching for the AS number on the VPC with ID %s", - network.getName(), network.getVpcId())); + LOGGER.debug("The network {} is a VPC tier, searching for the AS number on the VPC {}", + network::toString, () -> vpcDao.findById(network.getVpcId())); networkSearchId = null; vpcSerchId = network.getVpcId(); } @@ -226,15 +226,17 @@ public class BGPServiceImpl implements BGPService { asNumberDao.findOneByAllocationStateAndZone(zoneId, false); if (asNumberVO == null || asNumberVO.getDataCenterId() != zoneId) { if (asNumber != null) { - LOGGER.error(String.format("Cannot find AS number %s in zone with ID %s", asNumber, zoneId)); + LOGGER.error("Cannot find AS number {} in zone {} with id {}", asNumber, dataCenterDao.findById(zoneId), zoneId); return false; } throw new CloudRuntimeException(String.format("Cannot allocate AS number in zone with ID %s", zoneId)); } long accountId, domainId; String netName; + VpcVO vpc = null; + NetworkVO network = null; if (Objects.nonNull(vpcId)) { - VpcVO vpc = vpcDao.findById(vpcId); + vpc = vpcDao.findById(vpcId); if (vpc == null) { LOGGER.error(String.format("Cannot find VPC with ID %s", vpcId)); return false; @@ -243,7 +245,7 @@ public class BGPServiceImpl implements BGPService { domainId = vpc.getDomainId(); netName = vpc.getName(); } else { - NetworkVO network = networkDao.findById(networkId); + network = networkDao.findById(networkId); if (network == null) { LOGGER.error(String.format("Cannot find network with ID %s", networkId)); return false; @@ -253,8 +255,9 @@ public class BGPServiceImpl implements BGPService { netName = network.getName(); } - LOGGER.debug(String.format("Allocating the AS Number %s to %s %s on zone %s", asNumber, - (Objects.nonNull(vpcId) ? "VPC" : "network"), netName, zoneId)); + LOGGER.debug("Allocating the AS Number {} to {} on zone {}", asNumber::toString, + (Objects.nonNull(vpcId) ? "VPC " + vpc : "network " + network)::toString, + () -> dataCenterDao.findById(zoneId)); asNumberVO.setAllocated(true); asNumberVO.setAllocatedTime(new Date()); if (Objects.nonNull(vpcId)) { @@ -291,11 +294,12 @@ public class BGPServiceImpl implements BGPService { @ActionEvent(eventType = EventTypes.EVENT_AS_NUMBER_RELEASE, eventDescription = "Releasing AS Number") public Pair releaseASNumber(long zoneId, long asNumber, boolean isDestroyNetworkOperation) { ASNumberVO asNumberVO = asNumberDao.findByAsNumber(asNumber); + DataCenterVO zone = dataCenterDao.findById(zoneId); if (asNumberVO == null) { - return logAndReturnErrorMessage(String.format("Cannot find AS Number %s on zone %s", asNumber, zoneId)); + return logAndReturnErrorMessage(String.format("Cannot find AS Number %s on zone %s", asNumber, zone)); } if (!asNumberVO.isAllocated()) { - LOGGER.debug(String.format("The AS Number %s is not allocated to any network on zone %s, ignoring release", asNumber, zoneId)); + LOGGER.debug("The AS Number {} is not allocated to any network on zone {}, ignoring release", asNumber, zone); return new Pair<>(true, ""); } Long networkId = asNumberVO.getNetworkId(); @@ -306,7 +310,7 @@ public class BGPServiceImpl implements BGPService { return checksResult; } } - LOGGER.debug(String.format("Releasing AS Number %s on zone %s from previous allocation", asNumber, zoneId)); + LOGGER.debug("Releasing AS Number {} on zone {} from previous allocation", asNumber, zone); asNumberVO.setAllocated(false); asNumberVO.setAllocatedTime(null); asNumberVO.setDomainId(null); @@ -361,6 +365,7 @@ public class BGPServiceImpl implements BGPService { long startASNumber = asRange.getStartASNumber(); long endASNumber = asRange.getEndASNumber(); long zoneId = asRange.getDataCenterId(); + DataCenterVO zone = dataCenterDao.findById(zoneId); List allocatedAsNumbers = asNumberDao.listAllocatedByASRange(asRange.getId()); if (Objects.nonNull(allocatedAsNumbers) && !allocatedAsNumbers.isEmpty()) { throw new CloudRuntimeException(String.format("There are %s AS numbers in use from the range %s-%s, cannot remove the range", @@ -374,13 +379,12 @@ public class BGPServiceImpl implements BGPService { LOGGER.debug(String.format("Removed %s AS numbers from the range %s-%s", removedASNumbers, startASNumber, endASNumber)); asNumberRangeDao.remove(id); - LOGGER.debug(String.format("Removing the AS Number Range %s-%s for the zone %s", startASNumber, - endASNumber, zoneId)); + LOGGER.debug("Removing the AS Number Range {}-{} for the zone {}", startASNumber, endASNumber, zone); } }); } catch (Exception e) { String err = String.format("Error removing AS Number range %s-%s for zone %s: %s", - startASNumber, endASNumber, zoneId, e.getMessage()); + startASNumber, endASNumber, zone, e.getMessage()); LOGGER.error(err, e); throw new CloudRuntimeException(err); } diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 08f055ca3a3..732c78b775e 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -170,21 +170,21 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (hostId == null) { return true; } + HostVO host = _hostDao.findById(hostId); + return releaseVmCapacity(vm, moveFromReserved, moveToReservered, host); + } + + @DB + public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserved, final boolean moveToReservered, final Host host) { + if (host == null) { + return true; + } final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); - CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); - CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); - CapacityVO capacityCpuCore = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU_CORE); - Long clusterId = null; - if (hostId != null) { - HostVO host = _hostDao.findById(hostId); - if (host == null) { - logger.warn("Host " + hostId + " no long exist anymore!"); - return true; - } - - clusterId = host.getClusterId(); - } + CapacityVO capacityCpu = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); + CapacityVO capacityMemory = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); + CapacityVO capacityCpuCore = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU_CORE); + Long clusterId = host.getClusterId(); if (capacityCpu == null || capacityMemory == null || svo == null || capacityCpuCore == null) { return false; } @@ -255,13 +255,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } } - logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + - ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() + - "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); + logger.debug("release cpu from host: {}, old used: {}, " + + "reserved: {}, actual total: {}, total with overprovisioning: {}; " + + "new used: {},reserved:{}; movedfromreserved: {},moveToReservered: {}", host, usedCpu, reservedCpu, actualTotalCpu, totalCpu, capacityCpu.getUsedCapacity(), capacityCpu.getReservedCapacity(), moveFromReserved, moveToReservered); - logger.debug("release mem from host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ",reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; new used: " + - toHumanReadableSize(capacityMemory.getUsedCapacity()) + ",reserved:" + toHumanReadableSize(capacityMemory.getReservedCapacity()) + "; movedfromreserved: " + moveFromReserved + - ",moveToReservered" + moveToReservered); + logger.debug("release mem from host: {}, old used: {}, " + + "reserved: {}, total: {}; new used: {}, reserved: {}; " + + "movedfromreserved: {}, moveToReservered: {}", host, toHumanReadableSize(usedMem), toHumanReadableSize(reservedMem), toHumanReadableSize(totalMem), toHumanReadableSize(capacityMemory.getUsedCapacity()), toHumanReadableSize(capacityMemory.getReservedCapacity()), moveFromReserved, moveToReservered); _capacityDao.update(capacityCpu.getId(), capacityCpu); _capacityDao.update(capacityMemory.getId(), capacityMemory); @@ -280,7 +280,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Override public void allocateVmCapacity(VirtualMachine vm, final boolean fromLastHost) { - final long vmId = vm.getId(); final long hostId = vm.getHostId(); final HostVO host = _hostDao.findById(hostId); final long clusterId = host.getClusterId(); @@ -332,9 +331,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long freeMem = totalMem - (reservedMem + usedMem); if (logger.isDebugEnabled()) { - logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); - logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); - logger.debug("Current Used RAM: " + toHumanReadableSize(usedMem) + " , Free RAM:" + toHumanReadableSize(freeMem) + " ,Requested RAM: " + toHumanReadableSize(ram)); + logger.debug("We are allocating VM, increasing the used capacity of this host:{}", host); + logger.debug("Current Used CPU: {} , Free CPU:{} ,Requested CPU: {}", usedCpu, freeCpu, cpu); + logger.debug("Current Used RAM: {} , Free RAM:{} ,Requested RAM: {}", toHumanReadableSize(usedMem), toHumanReadableSize(freeMem), toHumanReadableSize(ram)); } capacityCpu.setUsedCapacity(usedCpu + cpu); capacityMem.setUsedCapacity(usedMem + ram); @@ -362,13 +361,19 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } } - logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + - actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + - capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost); + logger.debug(String.format("CPU STATS after allocation: for host: %s, " + + "old used: %d, old reserved: %d, actual total: %d, " + + "total with overprovisioning: %d; new used: %d, reserved: %d; " + + "requested cpu: %d, alloc_from_last: %s", + host, usedCpu, reservedCpu, actualTotalCpu, totalCpu, + capacityCpu.getUsedCapacity(), capacityCpu.getReservedCapacity(), cpu, fromLastHost)); - logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ", old reserved: " + toHumanReadableSize(reservedMem) + ", total: " + - toHumanReadableSize(totalMem) + "; new used: " + toHumanReadableSize(capacityMem.getUsedCapacity()) + ", reserved: " + toHumanReadableSize(capacityMem.getReservedCapacity()) + "; requested mem: " + toHumanReadableSize(ram) + - ",alloc_from_last:" + fromLastHost); + logger.debug("RAM STATS after allocation: for host: {}, " + + "old used: {}, old reserved: {}, total: {}; new used: {}, reserved: {}; " + + "requested mem: {}, alloc_from_last: {}", + host, toHumanReadableSize(usedMem), toHumanReadableSize(reservedMem), + toHumanReadableSize(totalMem), toHumanReadableSize(capacityMem.getUsedCapacity()), + toHumanReadableSize(capacityMem.getReservedCapacity()), toHumanReadableSize(ram), fromLastHost); long cluster_id = host.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); @@ -381,15 +386,15 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (hostHasCpuCapability) { // first check from reserved capacity - hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu, ram, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = checkIfHostHasCapacity(host, cpu, ram, true, cpuOvercommitRatio, memoryOvercommitRatio, true); // if not reserved, check the free capacity if (!hostHasCapacity) - hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu, ram, false, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = checkIfHostHasCapacity(host, cpu, ram, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } if (!hostHasCapacity || !hostHasCpuCapability) { - throw new CloudRuntimeException("Host does not have enough capacity for vm " + vmId); + throw new CloudRuntimeException("Host does not have enough capacity for vm " + vm); } _capacityDao.update(capacityCpu.getId(), capacityCpu); @@ -415,41 +420,40 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; if (isCpuNumGood && isCpuSpeedGood) { if (logger.isDebugEnabled()) { - logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + - ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); + logger.debug("Host: {} has cpu capability (cpu:{}, speed:{}) " + + "to support requested CPU: {} and requested speed: {}", host, host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); } return true; } else { if (logger.isDebugEnabled()) { - logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + - ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); + logger.debug("Host: {} doesn't have cpu capability (cpu:{}, speed:{})" + + " to support requested CPU: {} and requested speed: {}", host, host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); } return false; } } @Override - public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio, float memoryOvercommitRatio, + public boolean checkIfHostHasCapacity(Host host, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio, float memoryOvercommitRatio, boolean considerReservedCapacity) { boolean hasCapacity = false; if (logger.isDebugEnabled()) { - logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + toHumanReadableSize(ram) + - " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); + logger.debug(String.format("Checking if host: %s has enough capacity for requested CPU: %d and requested RAM: %s , cpuOverprovisioningFactor: %s", host, cpu, toHumanReadableSize(ram), cpuOvercommitRatio)); } - CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); - CapacityVO capacityMem = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); + CapacityVO capacityCpu = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); + CapacityVO capacityMem = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); if (capacityCpu == null || capacityMem == null) { if (capacityCpu == null) { if (logger.isDebugEnabled()) { - logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for host: {}", host); } } if (capacityMem == null) { if (logger.isDebugEnabled()) { - logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for host: {}", host); } } @@ -524,21 +528,15 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, logger.debug("Host has enough CPU and RAM available"); } - logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + - ", total with overprovisioning: " + totalCpu + "; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity + - " ,considerReservedCapacity?: " + considerReservedCapacity); + logger.debug("STATS: Can alloc CPU from host: {}, used: {}, reserved: {}, actual total: {}, total with overprovisioning: {}; requested cpu: {}, alloc_from_last_host?: {}, considerReservedCapacity?: {}", host, usedCpu, reservedCpu, actualTotalCpu, totalCpu, cpu, checkFromReservedCapacity, considerReservedCapacity); - logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + toHumanReadableSize(usedMem) + ", reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + - "; requested mem: " + toHumanReadableSize(ram) + ", alloc_from_last_host?: " + checkFromReservedCapacity + " , considerReservedCapacity?: " + considerReservedCapacity); + logger.debug("STATS: Can alloc MEM from host: {}, used: {}, reserved: {}, total: {}; requested mem: {}, alloc_from_last_host?: {}, considerReservedCapacity?: {}", host, toHumanReadableSize(usedMem), toHumanReadableSize(reservedMem), toHumanReadableSize(totalMem), toHumanReadableSize(ram), checkFromReservedCapacity, considerReservedCapacity); } else { if (checkFromReservedCapacity) { - logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + - toHumanReadableSize(reservedMem) + ", requested mem: " + toHumanReadableSize(ram)); + logger.debug("STATS: Failed to alloc resource from host: {} reservedCpu: {}, requested cpu: {}, reservedMem: {}, requested mem: {}", host, reservedCpu, cpu, toHumanReadableSize(reservedMem), toHumanReadableSize(ram)); } else { - logger.debug("STATS: Failed to alloc resource from host: " + hostId + ", reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + - cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + toHumanReadableSize(reservedMem) + ", used Mem: " + - toHumanReadableSize(usedMem) + ", requested mem: " + toHumanReadableSize(ram) + ", total Mem:" + toHumanReadableSize(totalMem) + " ,considerReservedCapacity?: " + considerReservedCapacity); + logger.debug("STATS: Failed to alloc resource from host: {}, reservedCpu: {}, used cpu: {}, requested cpu: {}, actual total cpu: {}, total cpu with overprovisioning: {}, reservedMem: {}, used Mem: {}, requested mem: {}, total Mem: {}, considerReservedCapacity?: {}", host, reservedCpu, usedCpu, cpu, actualTotalCpu, totalCpu, toHumanReadableSize(reservedMem), toHumanReadableSize(usedMem), toHumanReadableSize(ram), toHumanReadableSize(totalMem), considerReservedCapacity); } if (logger.isDebugEnabled()) { @@ -655,12 +653,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, List vms = _vmDao.listUpByHostId(host.getId()); if (logger.isDebugEnabled()) { - logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); + logger.debug("Found {} VMs on host {}", vms.size(), host); } final List vosMigrating = _vmDao.listVmsMigratingFromHost(host.getId()); if (logger.isDebugEnabled()) { - logger.debug("Found " + vosMigrating.size() + " VMs are Migrating from host " + host.getId()); + logger.debug("Found {} VMs are Migrating from host {}", vosMigrating.size(), host); } vms.addAll(vosMigrating); @@ -705,7 +703,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, List vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); if (logger.isDebugEnabled()) { - logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); + logger.debug("Found {} VM, not running on host {}", vmsByLastHostId.size(), host); } for (VMInstanceVO vm : vmsByLastHostId) { Float cpuOvercommitRatio = 1.0f; @@ -771,31 +769,27 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long hostTotalCpuCore = host.getCpus().longValue(); if (cpuCoreCap.getTotalCapacity() != hostTotalCpuCore) { - logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" - + cpuCoreCap.getTotalCapacity() + " new total CPU:" + hostTotalCpuCore); + logger.debug("Calibrate total cpu for host: {} old total CPU:{} new total CPU:{}", host, cpuCoreCap.getTotalCapacity(), hostTotalCpuCore); cpuCoreCap.setTotalCapacity(hostTotalCpuCore); } if (cpuCoreCap.getUsedCapacity() == usedCpuCore && cpuCoreCap.getReservedCapacity() == reservedCpuCore) { - logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpuCore: " + cpuCoreCap.getUsedCapacity() - + " reservedCpuCore: " + cpuCoreCap.getReservedCapacity()); + logger.debug("No need to calibrate cpu capacity, host:{} usedCpuCore: {} reservedCpuCore: {}", host, cpuCoreCap.getUsedCapacity(), cpuCoreCap.getReservedCapacity()); } else { if (cpuCoreCap.getReservedCapacity() != reservedCpuCore) { - logger.debug("Calibrate reserved cpu core for host: " + host.getId() + " old reservedCpuCore:" - + cpuCoreCap.getReservedCapacity() + " new reservedCpuCore:" + reservedCpuCore); + logger.debug("Calibrate reserved cpu core for host: {} old reservedCpuCore: {} new reservedCpuCore: {}", host, cpuCoreCap.getReservedCapacity(), reservedCpuCore); cpuCoreCap.setReservedCapacity(reservedCpuCore); } if (cpuCoreCap.getUsedCapacity() != usedCpuCore) { - logger.debug("Calibrate used cpu core for host: " + host.getId() + " old usedCpuCore:" - + cpuCoreCap.getUsedCapacity() + " new usedCpuCore:" + usedCpuCore); + logger.debug("Calibrate used cpu core for host: {} old usedCpuCore: {} new usedCpuCore: {}", host, cpuCoreCap.getUsedCapacity(), usedCpuCore); cpuCoreCap.setUsedCapacity(usedCpuCore); } } try { _capacityDao.update(cpuCoreCap.getId(), cpuCoreCap); } catch (Exception e) { - logger.error("Caught exception while updating cpucore capacity for the host " +host.getId(), e); + logger.error("Caught exception while updating cpucore capacity for the host {}", host, e); } } else { final long usedCpuCoreFinal = usedCpuCore; @@ -819,51 +813,46 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue(); if (cpuCap.getTotalCapacity() != hostTotalCpu) { - logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu); + logger.debug("Calibrate total cpu for host: {} old total CPU:{} new total CPU:{}", host, cpuCap.getTotalCapacity(), hostTotalCpu); cpuCap.setTotalCapacity(hostTotalCpu); } // Set the capacity state as per the host allocation state. if(capacityState != cpuCap.getCapacityState()){ - logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); + logger.debug("Calibrate cpu capacity state for host: {} old capacity state:{} new capacity state:{}", host, cpuCap.getTotalCapacity(), hostTotalCpu); cpuCap.setCapacityState(capacityState); } memCap.setCapacityState(capacityState); if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) { - logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + - cpuCap.getReservedCapacity()); + logger.debug("No need to calibrate cpu capacity, host:{} usedCpu: {} reservedCpu: {}", host, cpuCap.getUsedCapacity(), cpuCap.getReservedCapacity()); } else { if (cpuCap.getReservedCapacity() != reservedCpu) { - logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + - reservedCpu); + logger.debug("Calibrate reserved cpu for host: {} old reservedCpu:{} new reservedCpu:{}", host, cpuCap.getReservedCapacity(), reservedCpu); cpuCap.setReservedCapacity(reservedCpu); } if (cpuCap.getUsedCapacity() != usedCpu) { - logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu); + logger.debug("Calibrate used cpu for host: {} old usedCpu:{} new usedCpu:{}", host, cpuCap.getUsedCapacity(), usedCpu); cpuCap.setUsedCapacity(usedCpu); } } if (memCap.getTotalCapacity() != host.getTotalMemory()) { - logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + toHumanReadableSize(memCap.getTotalCapacity()) + " new total memory:" + - toHumanReadableSize(host.getTotalMemory())); + logger.debug("Calibrate total memory for host: {} old total memory:{} new total memory:{}", host, toHumanReadableSize(memCap.getTotalCapacity()), toHumanReadableSize(host.getTotalMemory())); memCap.setTotalCapacity(host.getTotalMemory()); } // Set the capacity state as per the host allocation state. if(capacityState != memCap.getCapacityState()){ - logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); + logger.debug("Calibrate memory capacity state for host: {} old capacity state:{} new capacity state:{}", host, memCap.getTotalCapacity(), hostTotalCpu); memCap.setCapacityState(capacityState); } if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) { - logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + toHumanReadableSize(memCap.getUsedCapacity()) + " reservedMem: " + - toHumanReadableSize(memCap.getReservedCapacity())); + logger.debug("No need to calibrate memory capacity, host:{} usedMem: {} reservedMem: {}", host, toHumanReadableSize(memCap.getUsedCapacity()), toHumanReadableSize(memCap.getReservedCapacity())); } else { if (memCap.getReservedCapacity() != reservedMemory) { - logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + - reservedMemory); + logger.debug("Calibrate reserved memory for host: {} old reservedMem:{} new reservedMem:{}", host, memCap.getReservedCapacity(), reservedMemory); memCap.setReservedCapacity(reservedMemory); } if (memCap.getUsedCapacity() != usedMemory) { @@ -872,7 +861,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, * state(starting/migrating) that I don't know on which host * they are allocated */ - logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + toHumanReadableSize(memCap.getUsedCapacity()) + " new usedMem: " + toHumanReadableSize(usedMemory)); + logger.debug("Calibrate used memory for host: {} old usedMem: {} new usedMem: {}", host, toHumanReadableSize(memCap.getUsedCapacity()), toHumanReadableSize(usedMemory)); memCap.setUsedCapacity(usedMemory); } } @@ -881,7 +870,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, _capacityDao.update(cpuCap.getId(), cpuCap); _capacityDao.update(memCap.getId(), memCap); } catch (Exception e) { - logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e); + logger.error("Caught exception while updating cpu/memory capacity for the host {}", host, e); } } else { final long usedMemoryFinal = usedMemory; @@ -936,38 +925,38 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (oldState == State.Starting) { if (newState != State.Running) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } else if (oldState == State.Running) { if (event == Event.AgentReportStopped) { - releaseVmCapacity(vm, false, true, oldHostId); + releaseVmCapacity(vm, false, true, oldHost); } else if (event == Event.AgentReportMigrated) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } else if (oldState == State.Migrating) { if (event == Event.AgentReportStopped) { /* Release capacity from original host */ - releaseVmCapacity(vm, false, false, vm.getLastHostId()); - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, lastHost); + releaseVmCapacity(vm, false, false, oldHost); } else if (event == Event.OperationFailed) { /* Release from dest host */ - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } else if (event == Event.OperationSucceeded) { - releaseVmCapacity(vm, false, false, vm.getLastHostId()); + releaseVmCapacity(vm, false, false, lastHost); } } else if (oldState == State.Stopping) { if (event == Event.OperationSucceeded) { - releaseVmCapacity(vm, false, true, oldHostId); + releaseVmCapacity(vm, false, true, oldHost); } else if (event == Event.AgentReportStopped) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } else if (event == Event.AgentReportMigrated) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } else if (oldState == State.Stopped) { if (event == Event.DestroyRequested || event == Event.ExpungeOperation) { - releaseVmCapacity(vm, true, false, vm.getLastHostId()); + releaseVmCapacity(vm, true, false, lastHost); } else if (event == Event.AgentReportMigrated) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } @@ -1079,7 +1068,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Override public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequested, long ramRequested) { - Float clusterCpuOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_CPU); Float clusterMemoryOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_MEMORY); Float clusterCpuCapacityDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId); @@ -1087,15 +1075,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, float cpuConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_CPU, cpuRequested); if (cpuConsumption / clusterCpuOverProvisioning > clusterCpuCapacityDisableThreshold) { - logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning - + " crosses disable threshold " + clusterCpuCapacityDisableThreshold); + logger.debug("Cluster: {} cpu consumption {} crosses disable threshold {}", _clusterDao.findById(clusterId), cpuConsumption / clusterCpuOverProvisioning, clusterCpuCapacityDisableThreshold); return true; } float memoryConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_MEMORY, ramRequested); if (memoryConsumption / clusterMemoryOverProvisioning > clusterMemoryCapacityDisableThreshold) { - logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning - + " crosses disable threshold " + clusterMemoryCapacityDisableThreshold); + logger.debug("Cluster: {} memory consumption {} crosses disable threshold {}", _clusterDao.findById(clusterId), memoryConsumption / clusterMemoryOverProvisioning, clusterMemoryCapacityDisableThreshold); return true; } @@ -1114,7 +1100,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); - boolean hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, + boolean hostHasCapacity = checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, considerReservedCapacity); return new Pair<>(hostHasCpuCapability, hostHasCapacity); diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 02abc507fdb..dee1aa81758 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -752,7 +752,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati resourceType = ApiCommandResourceType.StoragePool; if(name.equals(CapacityManager.StorageOverprovisioningFactor.key())) { if(!pool.getPoolType().supportsOverProvisioning() ) { - throw new InvalidParameterValueException("Unable to update storage pool with id " + resourceId + ". Overprovision not supported for " + pool.getPoolType()); + throw new InvalidParameterValueException(String.format("Unable to update storage pool %s. Overprovision not supported for %s", pool, pool.getPoolType())); } } @@ -1608,7 +1608,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (checkForDuplicates) { // Check if the pod already exists if (validPod(podName, zone.getId())) { - throw new InvalidParameterValueException("A pod with name: " + podName + " already exists in zone " + zone.getId() + ". Please specify a different pod name. "); + throw new InvalidParameterValueException(String.format("A pod with name: %s already exists in zone %s. Please specify a different pod name. ", podName, zone)); } } @@ -1647,7 +1647,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final List privateIps = _privateIpAddressDao.listByPodIdDcId(podId, pod.getDataCenterId()); if (!privateIps.isEmpty()) { if (!_privateIpAddressDao.deleteIpAddressByPod(podId)) { - throw new CloudRuntimeException("Failed to cleanup private ip addresses for pod " + podId); + throw new CloudRuntimeException(String.format("Failed to cleanup private ip addresses for pod %s", pod)); } } @@ -1655,7 +1655,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final List localIps = _linkLocalIpAllocDao.listByPodIdDcId(podId, pod.getDataCenterId()); if (!localIps.isEmpty()) { if (!_linkLocalIpAllocDao.deleteIpAddressByPod(podId)) { - throw new CloudRuntimeException("Failed to cleanup private ip addresses for pod " + podId); + throw new CloudRuntimeException(String.format("Failed to cleanup private ip addresses for pod %s", pod)); } } @@ -1672,7 +1672,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Delete the pod if (!_podDao.remove(podId)) { - throw new CloudRuntimeException("Failed to delete pod " + podId); + throw new CloudRuntimeException(String.format("Failed to delete pod %s", pod)); } // remove from dedicated resources @@ -1717,7 +1717,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Account account = CallContext.current().getCallingAccount(); if(!_accountMgr.isRootAdmin(account.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Calling account is not root admin: " + account.getId()); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", account)); } final long podId = cmd.getPodId(); @@ -1765,11 +1765,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Because each pod has only one Gateway and Netmask. if (!gateway.equals(pod.getGateway())) { - throw new InvalidParameterValueException("Multiple gateways for the POD: " + pod.getId() + " are not allowed. The Gateway should be same as the existing Gateway " + pod.getGateway()); + throw new InvalidParameterValueException(String.format("Multiple gateways for the POD: %s are not allowed. The Gateway should be same as the existing Gateway %s", pod, pod.getGateway())); } if (!netmask.equals(NetUtils.getCidrNetmask(cidrSize))) { - throw new InvalidParameterValueException("Multiple subnets for the POD: " + pod.getId() + " are not allowed. The Netmask should be same as the existing Netmask " + NetUtils.getCidrNetmask(cidrSize)); + throw new InvalidParameterValueException(String.format("Multiple subnets for the POD: %s are not allowed. The Netmask should be same as the existing Netmask %s", pod, NetUtils.getCidrNetmask(cidrSize))); } // Check if the IP range is valid. @@ -1828,7 +1828,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati lock = _podDao.acquireInLockTable(podId); if (lock == null) { - String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Creation failed."; + String msg = String.format("Unable to acquire lock on table to update the ip range of POD: %s, Creation failed.", pod); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -1916,7 +1916,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } if(!foundRange) { - throw new InvalidParameterValueException("The input IP range: " + startIp + "-" + endIp + " of pod: " + podId + "is not present. Please input an existing range."); + throw new InvalidParameterValueException(String.format("The input IP range: %s-%s of pod: %sis not present. Please input an existing range.", startIp, endIp, pod)); } final StringBuilder newPodIpRange = new StringBuilder(); @@ -1941,7 +1941,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati lock = _podDao.acquireInLockTable(podId); if (lock == null) { - String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Deletion failed."; + String msg = String.format("Unable to acquire lock on table to update the ip range of POD: %s, Deletion failed.", pod); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -1955,14 +1955,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati for(long ipAddr = NetUtils.ip2Long(startIp); ipAddr <= NetUtils.ip2Long(endIp); ipAddr++) { if (!_privateIpAddressDao.deleteIpAddressByPodDc(NetUtils.long2Ip(ipAddr), podId, pod.getDataCenterId())) { - throw new CloudRuntimeException("Failed to cleanup private ip address: " + NetUtils.long2Ip(ipAddr) + " of Pod: " + podId + " DC: " + pod.getDataCenterId()); + throw new CloudRuntimeException(String.format("Failed to cleanup private ip address: %s of Pod: %s DC: %s", NetUtils.long2Ip(ipAddr), pod, _zoneDao.findById(pod.getDataCenterId()))); } } } }); } catch (final Exception e) { - logger.error("Unable to delete Pod " + podId + "IP range due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to delete Pod " + podId + "IP range. Please contact Cloud Support."); + logger.error("Unable to delete Pod {} IP range due to {}", pod, e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to delete Pod %s IP range. Please contact Cloud Support.", pod)); } messageBus.publish(_name, MESSAGE_DELETE_POD_IP_RANGE_EVENT, PublishScope.LOCAL, pod); @@ -1996,7 +1996,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final String[] existingPodIpRanges = pod.getDescription().split(","); if (existingPodIpRanges.length == 0) { - throw new InvalidParameterValueException("The IP range cannot be found in the pod: " + podId + " since the existing IP range is empty."); + throw new InvalidParameterValueException(String.format("The IP range cannot be found in the pod: %s since the existing IP range is empty.", pod)); } verifyIpRangeParameters(currentStartIP,currentEndIP); @@ -2023,8 +2023,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } }); } catch (final Exception e) { - logger.error("Unable to update Pod " + podId + " IP range due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to update Pod " + podId + " IP range. Please contact Cloud Support."); + logger.error("Unable to update Pod {} IP range due to {}", pod, e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to update Pod %s IP range. Please contact Cloud Support.", pod)); } } @@ -2062,7 +2062,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati try { lock = _podDao.acquireInLockTable(podId); if (lock == null) { - String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Update failed."; + String msg = String.format("Unable to acquire lock on table to update the ip range of POD: %s, Update failed.", pod); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -2077,15 +2077,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (currentIpRange.size() > 0) { for (Long startIP: currentIpRange) { if (!_privateIpAddressDao.deleteIpAddressByPodDc(NetUtils.long2Ip(startIP),podId,zoneId)) { - throw new CloudRuntimeException("Failed to remove private ip address: " + NetUtils.long2Ip(startIP) + " of Pod: " + podId + " DC: " + pod.getDataCenterId()); + throw new CloudRuntimeException(String.format("Failed to remove private ip address: %s of Pod: %s DC: %s", NetUtils.long2Ip(startIP), pod, _zoneDao.findById(pod.getDataCenterId()))); } } } } _podDao.update(podId, pod); } catch (final Exception e) { - logger.error("Unable to update Pod " + podId + " IP range due to database error " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to update Pod " + podId + " IP range. Please contact Cloud Support."); + logger.error("Unable to update Pod {} IP range due to database error {}", pod, e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to update Pod %s IP range. Please contact Cloud Support.", pod)); } finally { if (lock != null) { _podDao.releaseFromLockTable(podId); @@ -2172,7 +2172,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati }); } catch (final Exception e) { logger.error(String.format("Unable to add IPv6 prefix for zone: %s due to %s", zone, e.getMessage()), e); - throw new CloudRuntimeException(String.format("Unable to add IPv6 prefix for zone ID: %s. Please contact Cloud Support.", zone.getUuid())); + throw new CloudRuntimeException(String.format("Unable to add IPv6 prefix for zone ID: %s. Please contact Cloud Support.", zone)); } return dataCenterGuestIpv6Prefix; } @@ -2360,7 +2360,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Account account = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } String cidr = null; @@ -2724,11 +2724,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please enter a valid guest cidr"); } - // Make sure the zone exists - if (!validZone(zoneId)) { - throw new InvalidParameterValueException("A zone with ID: " + zoneId + " does not exist."); - } - final String oldZoneName = zone.getName(); if (zoneName == null) { @@ -2834,8 +2829,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati _networkSvc.addTrafficTypeToPhysicalNetwork(mgmtPhyNetwork.getId(), TrafficType.Storage.toString(), "vlan", mgmtTraffic.getXenNetworkLabel(), mgmtTraffic.getKvmNetworkLabel(), mgmtTraffic.getVmwareNetworkLabel(), mgmtTraffic.getSimulatorNetworkLabel(), mgmtTraffic.getVlan(), mgmtTraffic.getHypervNetworkLabel(), mgmtTraffic.getOvm3NetworkLabel()); - logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network " + mgmtPhyNetwork.getId() - + " with same configure of management traffic type"); + logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network {} with same configure of management traffic type", mgmtPhyNetwork); } } catch (final InvalidParameterValueException ex) { throw new InvalidParameterValueException("Cannot enable this Zone since: " + ex.getMessage()); @@ -2859,7 +2853,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (resource != null) { resourceId = resource.getId(); if (!_dedicatedDao.remove(resourceId)) { - throw new CloudRuntimeException("Failed to delete dedicated Zone Resource " + resourceId); + throw new CloudRuntimeException(String.format("Failed to delete dedicated Zone Resource %s", resource)); } // find the group associated and check if there are any more // resources under that group @@ -3281,18 +3275,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Account account = _accountDao.findById(user.getAccountId()); if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to create public service offering by admin: %s because it is domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create public service offering by admin: %s because it is domain-admin", user)); } if (!org.apache.commons.lang3.StringUtils.isAllBlank(tags, hostTag) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to create service offerings with storage tags or host tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to create service offerings with storage tags or host tags.", user)); } for (Long domainId : filteredDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to create service offering by another domain-admin: %s for domain: %s", user.getUuid(), _entityMgr.findById(Domain.class, domainId).getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create service offering by another domain-admin: %s for domain: %s", user, _entityMgr.findById(Domain.class, domainId).getUuid())); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to create service offering by user: %s because it is not root-admin or domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create service offering by user: %s because it is not root-admin or domain-admin", user)); } final ProvisioningType typedProvisioningType = ProvisioningType.getProvisioningType(provisioningType); @@ -3606,23 +3600,23 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (!filteredZoneIds.equals(existingZoneIds)) { // Domain-admins cannot update zone(s) for offerings - throw new InvalidParameterValueException(String.format("Unable to update zone(s) for service offering: %s by admin: %s as it is domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update zone(s) for service offering: %s by admin: %s as it is domain-admin", offeringHandle, user)); } if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update public service offering: %s by user: %s because it is domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update public service offering: %s by user: %s because it is domain-admin", offeringHandle, user)); } else { if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s to a public offering by user: %s because it is domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s to a public offering by user: %s because it is domain-admin", offeringHandle, user)); } } if (!org.apache.commons.lang3.StringUtils.isAllBlank(hostTags, storageTags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to update storage tags or host tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to update storage tags or host tags.", user)); } List nonChildDomains = new ArrayList<>(); for (Long domainId : existingDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { if (name != null || displayText != null || sortKey != null) { // Domain-admins cannot update name, display text, sort key for offerings with domain which are not child domains for domain-admin - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offeringHandle, user)); } nonChildDomains.add(domainId); } @@ -3630,12 +3624,12 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati for (Long domainId : filteredDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { Domain domain = _entityMgr.findById(Domain.class, domainId); - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by domain-admin: %s with domain: %3$s which is not a child domain", offeringHandle.getUuid(), user.getUuid(), domain.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by domain-admin: %s with domain: %s which is not a child domain", offeringHandle, user, domain)); } } filteredDomainIds.addAll(nonChildDomains); // Final list must include domains which were not child domain for domain-admin but specified for this offering prior to update } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by id user: %s because it is not root-admin or domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by id user: %s because it is not root-admin or domain-admin", offeringHandle, user)); } final boolean updateNeeded = name != null || displayText != null || sortKey != null || storageTags != null || hostTags != null || state != null; @@ -3795,18 +3789,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Account account = _accountDao.findById(user.getAccountId()); if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to create public disk offering by admin: %s because it is domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create public disk offering by admin: %s because it is domain-admin", user)); } if (StringUtils.isNotBlank(tags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to create disk offerings with storage tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to create disk offerings with storage tags.", user)); } for (Long domainId : filteredDomainIds) { if (domainId == null || !_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to create disk offering by another domain-admin: %s for domain: %s", user.getUuid(), _entityMgr.findById(Domain.class, domainId).getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create disk offering by another domain-admin: %s for domain: %s", user, _entityMgr.findById(Domain.class, domainId).getUuid())); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to create disk offering by user: %s because it is not root-admin or domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create disk offering by user: %s because it is not root-admin or domain-admin", user)); } tags = com.cloud.utils.StringUtils.cleanupTags(tags); @@ -4074,7 +4068,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati checkDomainAdminUpdateOfferingRestrictions(diskOfferingHandle, user, filteredZoneIds, existingZoneIds, existingDomainIds, filteredDomainIds); if (StringUtils.isNotBlank(tags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to update disk offering tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to update disk offering tags.", user)); } List nonChildDomains = getAccountNonChildDomains(diskOfferingHandle, account, user, cmd, existingDomainIds); @@ -4083,7 +4077,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati filteredDomainIds.addAll(nonChildDomains); // Final list must include domains which were not child domain for domain-admin but specified for this offering prior to update } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s by id user: %s because it is not root-admin or domain-admin", diskOfferingHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s by id user: %s because it is not root-admin or domain-admin", diskOfferingHandle, user)); } boolean updateNeeded = shouldUpdateDiskOffering(name, displayText, sortKey, displayDiskOffering, tags, cacheMode, state) || @@ -4216,13 +4210,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati protected void checkDomainAdminUpdateOfferingRestrictions(DiskOffering diskOffering, User user, List filteredZoneIds, List existingZoneIds, List existingDomainIds, List filteredDomainIds) { if (!filteredZoneIds.equals(existingZoneIds)) { - throw new InvalidParameterValueException(String.format("Unable to update zone(s) for disk offering [%s] by admin [%s] as it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update zone(s) for disk offering [%s] by admin [%s] as it is domain-admin.", diskOffering, user)); } if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update public disk offering [%s] by user [%s] because it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update public disk offering [%s] by user [%s] because it is domain-admin.", diskOffering, user)); } if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] to a public offering by user [%s] because it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] to a public offering by user [%s] because it is domain-admin.", diskOffering, user)); } } @@ -4325,7 +4319,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } throw new InvalidParameterValueException(String.format("There are active VMs using offering [%s], and the hosts [%s] don't have the new tags", - offering.getId(), hosts)); + offering, hosts)); } } offering.setHostTag(hostTags); @@ -4374,15 +4368,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (account.getType() == Account.Type.DOMAIN_ADMIN) { List existingDomainIds = diskOfferingDetailsDao.findDomainIds(diskOfferingId); if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to delete public disk offering: %s by admin: %s because it is domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete public disk offering: %s by admin: %s because it is domain-admin", offering, user)); } for (Long domainId : existingDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering, user)); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s by user: %s because it is not root-admin or domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s by user: %s because it is not root-admin or domain-admin", offering, user)); } annotationDao.removeByEntityType(AnnotationService.EntityType.DISK_OFFERING.name(), offering.getUuid()); @@ -4433,7 +4427,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Verify disk offering id mapped to the service offering final DiskOfferingVO diskOffering = _diskOfferingDao.findById(offering.getDiskOfferingId()); if (diskOffering == null) { - throw new InvalidParameterValueException("unable to find disk offering " + offering.getDiskOfferingId() + " mapped to the service offering " + offeringId); + throw new InvalidParameterValueException("unable to find disk offering " + offering.getDiskOfferingId() + " mapped to the service offering " + offering); } if (offering.getDefaultUse()) { @@ -4448,22 +4442,22 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (account.getType() == Account.Type.DOMAIN_ADMIN) { List existingDomainIds = _serviceOfferingDetailsDao.findDomainIds(offeringId); if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to delete public service offering: %s by admin: %s because it is domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete public service offering: %s by admin: %s because it is domain-admin", offering, user)); } for (Long domainId : existingDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering, user)); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s by user: %s because it is not root-admin or domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s by user: %s because it is not root-admin or domain-admin", offering, user)); } annotationDao.removeByEntityType(AnnotationService.EntityType.SERVICE_OFFERING.name(), offering.getUuid()); if (diskOffering.isComputeOnly()) { diskOffering.setState(DiskOffering.State.Inactive); if (!_diskOfferingDao.update(diskOffering.getId(), diskOffering)) { - throw new CloudRuntimeException(String.format("Unable to delete disk offering %s mapped to the service offering %s", diskOffering.getUuid(), offering.getUuid())); + throw new CloudRuntimeException(String.format("Unable to delete disk offering %s mapped to the service offering %s", diskOffering, offering)); } } offering.setState(ServiceOffering.State.Inactive); @@ -4625,7 +4619,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } if (zone.isSecurityGroupEnabled() && zone.getNetworkType() != DataCenter.NetworkType.Basic && forVirtualNetwork) { @@ -4640,7 +4634,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati networkId = _networkModel.getSystemNetworkByZoneAndTrafficType(zoneId, TrafficType.Public).getId(); network = _networkModel.getNetwork(networkId); } else if (network.getGuestType() != null || network.getTrafficType() != TrafficType.Public) { - throw new InvalidParameterValueException("Can't find Public network by id=" + networkId); + throw new InvalidParameterValueException(String.format("Can't find Public network %s", network)); } } else { if (network == null) { @@ -4658,7 +4652,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } else if (network.getGuestType() == null || network.getGuestType() == Network.GuestType.Isolated && _ntwkOffServiceMapDao.areServicesSupportedByNetworkOffering(network.getNetworkOfferingId(), Service.SourceNat)) { - throw new InvalidParameterValueException("Can't create direct vlan for network id=" + networkId + " with type: " + network.getGuestType()); + throw new InvalidParameterValueException(String.format("Can't create direct vlan for network %s with type: %s", network, network.getGuestType())); } } @@ -4671,8 +4665,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (zone.getNetworkType() == DataCenter.NetworkType.Advanced) { if (network.getTrafficType() == TrafficType.Guest) { if (network.getGuestType() != GuestType.Shared) { - throw new InvalidParameterValueException("Can execute createVLANIpRanges on shared guest network, but type of this guest network " + network.getId() + " is " - + network.getGuestType()); + throw new InvalidParameterValueException(String.format("Can execute createVLANIpRanges on shared guest network, but type of this guest network %s is %s", network, network.getGuestType())); } final List vlans = _vlanDao.listVlansByNetworkId(network.getId()); @@ -4681,8 +4674,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (vlanId == null || vlanId.contains(Vlan.UNTAGGED)) { vlanId = vlan.getVlanTag(); } else if (!NetUtils.isSameIsolationId(vlan.getVlanTag(), vlanId)) { - throw new InvalidParameterValueException("there is already one vlan " + vlan.getVlanTag() + " on network :" + +network.getId() - + ", only one vlan is allowed on guest network"); + throw new InvalidParameterValueException(String.format("there is already one vlan %s on network :%s, only one vlan is allowed on guest network", vlan.getVlanTag(), network)); } } sameSubnet = validateIpRange(startIP, endIP, newVlanGateway, newVlanNetmask, vlans, ipv4, ipv6, ip6Gateway, ip6Cidr, startIPv6, endIPv6, network); @@ -4739,7 +4731,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (supportsMultipleSubnets == null || !Boolean.valueOf(supportsMultipleSubnets)) { throw new InvalidParameterValueException("The dhcp service provider for this network does not support dhcp across multiple subnets"); } - logger.info("adding a new subnet to the network " + network.getId()); + logger.info("adding a new subnet to the network {}", network); } else if (sameSubnet != null) { // if it is same subnet the user might not send the vlan and the // netmask details. so we are @@ -4913,7 +4905,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please specify a valid pod."); } if (pod.getDataCenterId() != zoneId) { - throw new InvalidParameterValueException("Pod id=" + podId + " doesn't belong to zone id=" + zoneId); + throw new InvalidParameterValueException(String.format("Pod %s doesn't belong to zone id=%d", pod, zoneId)); } // pod vlans can be created in basic zone only if (zone.getNetworkType() != NetworkType.Basic || network.getTrafficType() != TrafficType.Guest) { @@ -5306,7 +5298,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + id + " is acquired"); + logger.debug("lock on vlan {} is acquired", range); } commitUpdateVlanAndIpRange(id, newStartIP, newEndIP, currentStartIP, currentEndIP, gateway, netmask,true, isRangeForSystemVM, forSystemVms); @@ -5364,7 +5356,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + id + " is acquired"); + logger.debug("lock on vlan {} is acquired", range); } commitUpdateVlanAndIpRange(id, startIpv6, endIpv6, currentStartIPv6, currentEndIPv6, ip6Gateway, ip6Cidr, false, isRangeForSystemVM,forSystemVms); @@ -5385,7 +5377,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override public VlanVO doInTransaction(final TransactionStatus status) { VlanVO vlanRange = _vlanDao.findById(id); - logger.debug("Updating vlan range " + vlanRange.getId()); + logger.debug("Updating vlan range {}", vlanRange); if (ipv4) { vlanRange.setIpRange(newStartIP + "-" + newEndIP); vlanRange.setVlanGateway(gateway); @@ -5496,32 +5488,28 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + vlanDbId + " is acquired"); + logger.debug("lock on vlan {} is acquired", vlanRange); } for (final IPAddressVO ip : ips) { boolean success = true; if (ip.isOneToOneNat()) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip - + " belonging to the range is used for static nat purposes. Cleanup the rules first"); + throw new InvalidParameterValueException(String.format("Can't delete account specific vlan %s as ip %s belonging to the range is used for static nat purposes. Cleanup the rules first", vlanRange, ip)); } if (ip.isSourceNat()) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip - + " belonging to the range is a source nat ip for the network id=" + ip.getSourceNetworkId() - + ". IP range with the source nat ip address can be removed either as a part of Network, or account removal"); + throw new InvalidParameterValueException(String.format("Can't delete account specific vlan %s as ip %s belonging to the range is a source nat ip for the network id=%d. IP range with the source nat ip address can be removed either as a part of Network, or account removal", vlanRange, ip, ip.getSourceNetworkId())); } if (_firewallDao.countRulesByIpId(ip.getId()) > 0) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip - + " belonging to the range has firewall rules applied. Cleanup the rules first"); + throw new InvalidParameterValueException(String.format("Can't delete account specific vlan %s as ip %s belonging to the range has firewall rules applied. Cleanup the rules first", vlanRange, ip)); } if (ip.getAllocatedTime() != null) { // This means IP is allocated // release public ip address here - success = _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); + success = _ipAddrMgr.disassociatePublicIpAddress(ip, userId, caller); } if (!success) { - logger.warn("Some ip addresses failed to be released as a part of vlan " + vlanDbId + " removal"); + logger.warn("Some ip addresses failed to be released as a part of vlan {} removal", vlanRange); } else { resourceCountToBeDecrement++; final boolean usageHidden = _ipAddrMgr.isUsageHidden(ip); @@ -5539,8 +5527,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final NicIpAliasVO ipAlias = _nicIpAliasDao.findByGatewayAndNetworkIdAndState(vlanRange.getVlanGateway(), vlanRange.getNetworkId(), NicIpAlias.State.active); //check if the ipalias belongs to the vlan range being deleted. if (ipAlias != null && vlanDbId == _publicIpAddressDao.findByIpAndSourceNetworkId(vlanRange.getNetworkId(), ipAlias.getIp4Address()).getVlanId()) { - throw new InvalidParameterValueException("Cannot delete vlan range " + vlanDbId + " as " + ipAlias.getIp4Address() - + "is being used for providing dhcp service in this subnet. Delete all VMs in this subnet and try again"); + throw new InvalidParameterValueException(String.format("Cannot delete vlan range %s as %sis being used for providing dhcp service in this subnet. Delete all VMs in this subnet and try again", vlanRange, ipAlias.getIp4Address())); } final long allocIpCount = _publicIpAddressDao.countIPs(vlanRange.getDataCenterId(), vlanDbId, true); if (allocIpCount > 0) { @@ -5552,21 +5539,22 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException(String.format("%d IPv6 addresses are in use. Cannot delete this vlan", ipAddresses.size())); } + VlanVO finalVlanRange = vlanRange; Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { _publicIpAddressDao.deletePublicIPRange(vlanDbId); - logger.debug(String.format("Delete Public IP Range (from user_ip_address, where vlan_db_id=%s)", vlanDbId)); + logger.debug("Delete Public IP Range (from user_ip_address, where vlan_db_id={})", vlanDbId); _vlanDao.remove(vlanDbId); - logger.debug(String.format("Mark vlan as Remove vlan (vlan_db_id=%s)", vlanDbId)); + logger.debug("Mark vlan as Remove vlan (vlan_db_id={})", vlanDbId); SearchBuilder sb = podVlanMapDao.createSearchBuilder(); sb.and("vlan_db_id", sb.entity().getVlanDbId(), SearchCriteria.Op.EQ); SearchCriteria sc = sb.create(); sc.setParameters("vlan_db_id", vlanDbId); podVlanMapDao.remove(sc); - logger.debug(String.format("Delete vlan_db_id=%s in pod_vlan_map", vlanDbId)); + logger.debug("Delete vlan_db_id={} in pod_vlan_map", vlanDbId); } }); @@ -5701,16 +5689,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please specify a valid IP range id."); } - return releasePublicIpRange(vlanDbId, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount()); + return releasePublicIpRange(vlanDbId, CallContext.current().getCallingUser(), CallContext.current().getCallingAccount()); } @DB - public boolean releasePublicIpRange(final long vlanDbId, final long userId, final Account caller) { + public boolean releasePublicIpRange(final long vlanDbId, final User user, final Account caller) { VlanVO vlan = _vlanDao.findById(vlanDbId); if(vlan == null) { // Nothing to do if vlan can't be found - logger.warn(String.format("Skipping the process for releasing public IP range as could not find a VLAN with ID '%s' for Account '%s' and User '%s'." - ,vlanDbId, caller, userId)); + logger.warn("Skipping the process for releasing public IP range as could not find a VLAN with ID '{}' for Account '{}' and User '{}'.", + vlanDbId, caller, user); return true; } @@ -5745,21 +5733,21 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + vlanDbId + " is acquired"); + logger.debug("lock on vlan {} is acquired", vlan); } for (final IPAddressVO ip : ips) { // Disassociate allocated IP's that are not in use if (!ip.isOneToOneNat() && !ip.isSourceNat() && !(_firewallDao.countRulesByIpId(ip.getId()) > 0)) { if (logger.isDebugEnabled()) { - logger.debug("Releasing Public IP addresses" + ip + " of vlan " + vlanDbId + " as part of Public IP" + " range release to the system pool"); + logger.debug("Releasing Public IP addresses {} of vlan {} as part of Public IP range release to the system pool", ip, vlan); } - success = success && _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); + success = success && _ipAddrMgr.disassociatePublicIpAddress(ip, user.getId(), caller); } else { ipsInUse.add(ip); } } if (!success) { - logger.warn("Some Public IP addresses that were not in use failed to be released as a part of" + " vlan " + vlanDbId + "release to the system pool"); + logger.warn("Some Public IP addresses that were not in use failed to be released as a part of vlan {} release to the system pool", vlan); } } finally { _vlanDao.releaseFromLockTable(vlanDbId); @@ -6093,16 +6081,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, dof, zone)) { if (logger.isDebugEnabled()) { - logger.debug("Access granted to " + caller + " to disk offering:" + dof.getId() + " by " + checker.getName()); + logger.debug("Access granted to {} to disk offering: {} by {}", caller, dof, checker.getName()); } return; } else { - throw new PermissionDeniedException(String.format("Access denied to %s for disk offering: %s, zone: %s by %s", caller, dof.getUuid(), zone.getUuid(), checker.getName())); + throw new PermissionDeniedException(String.format("Access denied to %s for disk offering: %s, zone: %s by %s", caller, dof, zone, checker.getName())); } } assert false : "How can all of the security checkers pass on checking this caller?"; - throw new PermissionDeniedException("There's no way to confirm " + caller + " has access to disk offering:" + dof.getId()); + throw new PermissionDeniedException(String.format("There's no way to confirm %s has access to disk offering:%s", caller, dof)); } @Override @@ -6110,16 +6098,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, zone)) { if (logger.isDebugEnabled()) { - logger.debug("Access granted to " + caller + " to zone:" + zone.getId() + " by " + checker.getName()); + logger.debug("Access granted to {} to zone:{} by {}", caller, zone, checker.getName()); } return; } else { - throw new PermissionDeniedException("Access denied to " + caller + " by " + checker.getName() + " for zone " + zone.getId()); + throw new PermissionDeniedException(String.format("Access denied to %s by %s for zone %s", caller, checker.getName(), zone)); } } assert false : "How can all of the security checkers pass on checking this caller?"; - throw new PermissionDeniedException("There's no way to confirm " + caller + " has access to zone:" + zone.getId()); + throw new PermissionDeniedException(String.format("There's no way to confirm %s has access to zone:%s", caller, zone)); } @Override @@ -7338,8 +7326,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // though) final int networkCount = _networkDao.getNetworkCountByNetworkOffId(offeringId); if (networkCount > 0) { - throw new InvalidParameterValueException("Can't delete network offering " + offeringId + " as its used by " + networkCount + " networks. " - + "To make the network offering unavailable, disable it"); + throw new InvalidParameterValueException(String.format("Can't delete network offering %s as its used by %d networks. To make the network offering unavailable, disable it", offering, networkCount)); } annotationDao.removeByEntityType(AnnotationService.EntityType.NETWORK_OFFERING.name(), offering.getUuid()); @@ -7455,7 +7442,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (oldTags != null) { long oldPhysicalNetworkId = _networkModel.findPhysicalNetworkId(zoneId, oldTags, trafficType); if (newPhysicalNetworkId != oldPhysicalNetworkId) { - throw new InvalidParameterValueException("New tags: selects different physical network for zone " + zoneId); + throw new InvalidParameterValueException(String.format("New tags: selects different physical network for zone %s", dataCenter)); } } } @@ -7563,8 +7550,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Check if the account exists final Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null) { - logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); - throw new InvalidParameterValueException("Account by name: " + accountName + " doesn't exist in domain " + domainId); + DomainVO domain = _domainDao.findById(domainId); + String domainStr = domain == null ? String.valueOf(domainId) : domain.toString(); + logger.error("Unable to find account by name: {} in domain {}", accountName, domainStr); + throw new InvalidParameterValueException(String.format("Account by name: %s doesn't exist in domain %s", accountName, domainStr)); } // Don't allow modification of system account @@ -7678,16 +7667,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB - public boolean releaseDomainSpecificVirtualRanges(final long domainId) { - final List maps = _domainVlanMapDao.listDomainVlanMapsByDomain(domainId); + public boolean releaseDomainSpecificVirtualRanges(final Domain domain) { + final List maps = _domainVlanMapDao.listDomainVlanMapsByDomain(domain.getId()); if (CollectionUtils.isNotEmpty(maps)) { try { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { for (DomainVlanMapVO map : maps) { - if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser().getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { - throw new CloudRuntimeException("Failed to release domain specific virtual ip ranges for domain id=" + domainId); + if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { + throw new CloudRuntimeException(String.format("Failed to release domain specific virtual ip ranges for domain %s", domain)); } } } @@ -7697,23 +7686,23 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return false; } } else { - logger.trace("Domain id=" + domainId + " has no domain specific virtual ip ranges, nothing to release"); + logger.trace("Domain {} has no domain specific virtual ip ranges, nothing to release", domain); } return true; } @Override @DB - public boolean releaseAccountSpecificVirtualRanges(final long accountId) { - final List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(accountId); + public boolean releaseAccountSpecificVirtualRanges(final Account account) { + final List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(account.getId()); if (maps != null && !maps.isEmpty()) { try { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { for (final AccountVlanMapVO map : maps) { - if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser().getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { - throw new CloudRuntimeException("Failed to release account specific virtual ip ranges for account id=" + accountId); + if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { + throw new CloudRuntimeException(String.format("Failed to release account specific virtual ip ranges for account %s", account)); } } } @@ -7723,7 +7712,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return false; } } else { - logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); + logger.trace("Account {} has no account specific virtual ip ranges, nothing to release", account); } return true; } @@ -7834,9 +7823,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (zones != null && !zones.isEmpty()) { for (final DataCenterVO zone : zones) { // check if there is zone vlan with same id - if (_vlanDao.findByZoneAndVlanId(zone.getId(), vlanId) != null) { - throw new InvalidParameterValueException("Found a VLAN id " + vlanId + " already existing in" + " zone " + zone.getUuid() - + " that conflicts with VLAN id of the portable ip range being configured"); + VlanVO vlanVO = _vlanDao.findByZoneAndVlanId(zone.getId(), vlanId); + if (vlanVO != null) { + throw new InvalidParameterValueException(String.format("Found a VLAN id %s already existing in zone %s that conflicts with VLAN id of the portable ip range being configured", vlanVO, zone)); } //check if there is a public ip range that overlaps with portable ip range being created checkOverlapPublicIpRange(zone.getId(), startIP, endIP); diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java index 76e019df1b3..863307035ee 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java @@ -21,6 +21,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.HostPodDao; import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.security.keys.KeysManager; @@ -38,7 +39,6 @@ import com.cloud.server.ManagementServer; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.ConsoleProxyVO; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -52,6 +52,8 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol @Inject protected HostDao _hostDao; @Inject + protected HostPodDao podDao; + @Inject protected UserVmDao _userVmDao; protected String _consoleProxyUrlDomain; @Inject @@ -140,17 +142,11 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol } @Override - public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { - UserVmVO userVm = _userVmDao.findById(userVmId); - if (userVm == null) { - logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); - return null; - } - + public ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm) { HostVO host = findHost(userVm); if (host != null) { if (logger.isDebugEnabled()) { - logger.debug("Assign embedded console proxy running at " + host.getName() + " to user vm " + userVmId + " with public IP " + host.getPublicIpAddress()); + logger.debug("Assign embedded console proxy running at {} to user vm {} with public IP {}", host, userVm, host.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest @@ -172,7 +168,7 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM {} will be temporarily unavailable.", userVm); } return null; } diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java index 60e2265c41c..9e4710c89af 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java @@ -22,7 +22,7 @@ import java.util.List; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.info.ConsoleProxyInfo; -import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; /** * This class is intended to replace the use of console proxy VMs managed by the Apache CloudStack (ACS) @@ -31,12 +31,7 @@ import com.cloud.vm.UserVmVO; public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsoleProxyManager { @Override - public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { - UserVmVO userVm = _userVmDao.findById(userVmId); - if (userVm == null) { - logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); - return null; - } + public ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm) { HostVO host = findHost(userVm); if (host != null) { @@ -60,21 +55,22 @@ public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsolePr } if (allocatedHost == null) { if (logger.isDebugEnabled()) { - logger.debug("Failed to find a console proxy at host: " + host.getName() + " and in the pod: " + host.getPodId() + " to user vm " + userVmId); + logger.debug("Failed to find a console proxy at host: {} and in the pod: {} to user vm {}", + host::toString, () -> podDao.findById(host.getPodId()), userVm::toString); } return null; } if (logger.isDebugEnabled()) { - logger.debug("Assign standalone console proxy running at " + allocatedHost.getName() + " to user vm " + userVmId + " with public IP " - + allocatedHost.getPublicIpAddress()); + logger.debug("Assign standalone console proxy running at {} to user vm {} with public IP {}", allocatedHost, userVm, allocatedHost.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest of all are place-holder values String publicIp = allocatedHost.getPublicIpAddress(); if (publicIp == null) { if (logger.isDebugEnabled()) { - logger.debug("Host " + allocatedHost.getName() + "/" + allocatedHost.getPrivateIpAddress() - + " does not have public interface, we will return its private IP for cosole proxy."); + logger.debug("Host {} (private IP address: {}) does not have public " + + "interface, we will return its private IP for console proxy.", + allocatedHost, allocatedHost.getPrivateIpAddress()); } publicIp = allocatedHost.getPrivateIpAddress(); } @@ -86,7 +82,7 @@ public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsolePr return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM {} will be temporarily unavailable.", userVm); } return null; } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 53f76f8ad42..3db02f91775 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -329,8 +329,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } @Override - public ConsoleProxyInfo assignProxy(final long dataCenterId, final long vmId) { - ConsoleProxyVO proxy = doAssignProxy(dataCenterId, vmId); + public ConsoleProxyInfo assignProxy(final long dataCenterId, final VMInstanceVO userVm) { + ConsoleProxyVO proxy = doAssignProxy(dataCenterId, userVm); if (proxy == null) { return null; } @@ -355,14 +355,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy return info; } - public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { + public ConsoleProxyVO doAssignProxy(long dataCenterId, VMInstanceVO vm) { ConsoleProxyVO proxy = null; - VMInstanceVO vm = vmInstanceDao.findById(vmId); - - if (vm == null) { - logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); - return null; - } if (!availableVmStateOnAssignProxy.contains(vm.getState())) { if (logger.isInfoEnabled()) { @@ -379,17 +373,17 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (proxy != null) { if (!isInAssignableState(proxy)) { if (logger.isInfoEnabled()) { - logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : " + vmId); + logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : {}", vm); } proxy = null; } else { if (consoleProxyDao.getProxyActiveLoad(proxy.getId()) < capacityPerProxy || hasPreviousSession(proxy, vm)) { if (logger.isDebugEnabled()) { - logger.debug("Assign previous allocated console proxy for user vm : " + vmId); + logger.debug("Assign previous allocated console proxy for user vm: {}", vm); } if (proxy.getActiveSession() >= capacityPerProxy) { - logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); + logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm: {}", vm); } } else { proxy = null; @@ -405,8 +399,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy allocProxyLock.unlock(); } } else { - logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + - ". Previous console proxy allocation is taking too long"); + logger.error("Unable to acquire synchronization lock to get/allocate proxy " + + "resource for vm: {}. Previous console proxy allocation is taking too long", vm); } if (proxy == null) { @@ -415,7 +409,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } if (vm.getProxyId() == null || vm.getProxyId() != proxy.getId()) { - vmInstanceDao.updateProxyId(vmId, proxy.getId(), DateUtil.currentGMTTime()); + vmInstanceDao.updateProxyId(vm.getId(), proxy.getId(), DateUtil.currentGMTTime()); } proxy.setSslEnabled(sslEnabled); @@ -504,8 +498,9 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { + DataCenterVO zone = dataCenterDao.findById(dataCenterId); if (logger.isDebugEnabled()) { - logger.debug("Assign console proxy from running pool for request from data center : " + dataCenterId); + logger.debug("Assign console proxy from running pool for request from data center: {}", zone); } ConsoleProxyAllocator allocator = getCurrentAllocator(); @@ -542,14 +537,14 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy Long allocated = allocator.allocProxy(runningList, loadInfo, dataCenterId); if (allocated == null) { - logger.debug(String.format("Console proxy not found, unable to assign console proxy from running pool for request from zone [%s].", dataCenterId)); + logger.debug("Console proxy not found, unable to assign console proxy from running pool for request from zone [{}].", zone); return null; } return consoleProxyDao.findById(allocated); } else { if (logger.isDebugEnabled()) { - logger.debug("Empty running proxy pool for now in data center : " + dataCenterId); + logger.debug("Empty running proxy pool for now in data center: {}", zone); } } @@ -807,8 +802,9 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } private void allocCapacity(long dataCenterId) { + DataCenterVO zone = dataCenterDao.findById(dataCenterId); if (logger.isDebugEnabled()) { - logger.debug(String.format("Allocating console proxy standby capacity for zone [%s].", dataCenterId)); + logger.debug("Allocating console proxy standby capacity for zone [{}].", zone); } ConsoleProxyVO proxy = null; @@ -825,7 +821,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy try { proxy = startNew(dataCenterId); } catch (ConcurrentOperationException e) { - logger.warn(String.format("Unable to start new console proxy on zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); + logger.warn("Unable to start new console proxy on zone [{}] due to [{}].", zone, e.getMessage(), e); } finally { allocProxyLock.unlock(); } @@ -836,7 +832,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } } else { if (logger.isInfoEnabled()) { - logger.info("Found a stopped console proxy, starting it. Vm id : " + proxy.getId()); + logger.info("Found a stopped console proxy, starting it. VM: {}", proxy); } consoleProxyVmFromStoppedPool = true; } @@ -847,13 +843,13 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (proxy != null) { if (logger.isInfoEnabled()) { - logger.info("Console proxy " + proxy.getHostName() + " is started"); + logger.info("Console proxy {} is started", proxy); } SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_UP, dataCenterId, proxy.getId(), proxy, null)); } else { if (logger.isInfoEnabled()) { - logger.info("Unable to start console proxy vm for standby capacity, vm id : " + proxyVmId + ", will recycle it and start a new one"); + logger.info("Unable to start console proxy vm for standby capacity, vm: {}, will recycle it and start a new one", proxy); } if (consoleProxyVmFromStoppedPool) { @@ -863,7 +859,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } } catch (Exception e) { errorString = e.getMessage(); - logger.warn(String.format("Unable to allocate console proxy standby capacity for zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); + logger.warn("Unable to allocate console proxy standby capacity for zone [{}] due to [{}].", zone, e.getMessage(), e); throw e; } finally { if (proxy == null || proxy.getState() != State.Running) @@ -872,20 +868,20 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } } - public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { - List hosts = hostDao.listByDataCenterId(dataCenterId); + public boolean isZoneReady(Map zoneHostInfoMap, DataCenter dataCenter) { + List hosts = hostDao.listByDataCenterId(dataCenter.getId()); if (CollectionUtils.isEmpty(hosts)) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); + logger.debug("Zone {} has no host available which is enabled and in Up state", dataCenter); } return false; } - ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); + ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenter.getId()); if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { - VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); + VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenter.getId(), HypervisorType.Any); if (template == null) { if (logger.isDebugEnabled()) { - logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); + logger.debug("System vm template is not ready at data center {}, wait until it is ready to launch console proxy vm", dataCenter); } return false; } @@ -893,12 +889,12 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (template.isDirectDownload()) { templateHostRef = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); } else { - templateHostRef = templateDataStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dataCenterId, Status.DOWNLOADED); + templateHostRef = templateDataStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dataCenter.getId(), Status.DOWNLOADED); } if (templateHostRef != null) { - Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); - List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenterId, useLocalStorage); + Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenter.getId())); + List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenter.getId(), useLocalStorage); if (CollectionUtils.isNotEmpty(l) && l.get(0).second() > 0) { return true; } else { @@ -908,7 +904,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } } else { if (logger.isDebugEnabled()) { - logger.debug(String.format("Zone [%s] is ready, but console proxy template [%s] is not ready on secondary storage.", dataCenterId, template.getId())); + logger.debug("Zone [{}] is ready, but console proxy template [{}] is not ready on secondary storage.", dataCenter, template); } } } @@ -1101,7 +1097,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy consoleProxyDao.remove(vmId); HostVO host = hostDao.findByTypeNameAndZoneId(proxy.getDataCenterId(), proxy.getHostName(), Host.Type.ConsoleProxy); if (host != null) { - logger.debug(String.format("Removing host [%s] entry for proxy [%s].", host.toString(), vmId)); + logger.debug("Removing host [{}] entry for proxy [{}].", host, proxy); return hostDao.remove(host.getId()); } @@ -1467,7 +1463,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy private void handleResetSuspending() { List runningProxies = consoleProxyDao.getProxyListInStates(State.Running); for (ConsoleProxyVO proxy : runningProxies) { - logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); + logger.info("Stop console proxy {} because of we are currently in ResetSuspending management mode", proxy); stopProxy(proxy.getId()); } @@ -1509,9 +1505,10 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Override public boolean isPoolReadyForScan(Long dataCenterId) { - if (!isZoneReady(zoneHostInfoMap, dataCenterId)) { + DataCenterVO zone = dataCenterDao.findById(dataCenterId); + if (!isZoneReady(zoneHostInfoMap, zone)) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); + logger.debug("Zone {} is not ready to launch console proxy yet", zone); } return false; } @@ -1519,14 +1516,14 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy List l = consoleProxyDao.getProxyListInStates(VirtualMachine.State.Starting, VirtualMachine.State.Stopping); if (l.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); + logger.debug("Zone {} has {} console proxy VM(s) in transition state", zone, l.size()); } return false; } if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " is ready to launch console proxy"); + logger.debug("Zone {} is ready to launch console proxy", zone); } return true; } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java index 4e0d14d8835..55b366bfae6 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java @@ -17,9 +17,10 @@ package com.cloud.consoleproxy; import com.cloud.info.ConsoleProxyInfo; +import com.cloud.vm.VMInstanceVO; public interface ConsoleProxyService { - public abstract ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId); + public abstract ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm); } diff --git a/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java index 29a7497fc17..558bff3fdc8 100644 --- a/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java @@ -60,7 +60,7 @@ public class StaticConsoleProxyManager extends AgentBasedConsoleProxyManager imp } @Override - public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { + public ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm) { return new ConsoleProxyInfo(_sslEnabled, _ip, _consoleProxyPort, _consoleProxyUrlPort, _consoleProxyUrlDomain); } diff --git a/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java b/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java index 79c563ced6e..3324bf62041 100644 --- a/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java +++ b/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java @@ -26,6 +26,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.NumbersUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "dedicated_resources") @@ -170,4 +171,11 @@ public class DedicatedResourceVO implements DedicatedResources { public int hashCode() { return NumbersUtil.hash(id); } + + @Override + public String toString() { + return String.format("DedicatedResource %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid")); + } } diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index cf202564a99..a3c889cd070 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -49,7 +49,6 @@ import com.cloud.utils.fsm.StateMachine2; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; @@ -296,8 +295,7 @@ StateListener, Configurable { return; } final Long lastHostClusterId = lastHost.getClusterId(); - logger.warn(String.format("VM last host ID: %d belongs to zone ID: %s for which config - %s is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: %d from this zone to avoid list", - lastHost.getId(), vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId)); + logger.warn("VM last host ID: {} belongs to zone ID: {} for which config - {} is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: {} from this zone to avoid list", lastHost, vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId); List clusterIds = _clusterDao.listAllClusters(lastHost.getDataCenterId()); Set existingAvoidedClusters = avoids.getClustersToAvoid(); clusterIds = clusterIds.stream().filter(x -> !Objects.equals(x, lastHostClusterId) && (existingAvoidedClusters == null || !existingAvoidedClusters.contains(x))).collect(Collectors.toList()); @@ -317,20 +315,18 @@ StateListener, Configurable { boolean volumesRequireEncryption = anyVolumeRequiresEncryption(_volsDao.findByInstance(vm.getId())); if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) { - logger.debug("Checking non dedicated resources to deploy VM [{}].", () -> ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "type", "instanceName")); + logger.debug("Checking non dedicated resources to deploy VM [{}].", vm); checkForNonDedicatedResources(vmProfile, dc, avoids); } - logger.debug(() -> { - String datacenter = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dc, "uuid", "name"); - String podVO = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(_podDao.findById(plan.getPodId()), "uuid", "name"); - String clusterVO = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(_clusterDao.findById(plan.getClusterId()), "uuid", "name"); - String vmDetails = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "type", "instanceName"); - return String.format("Trying to allocate a host and storage pools from datacenter [%s], pod [%s], cluster [%s], to deploy VM [%s] " - + "with requested CPU [%s] and requested RAM [%s].", datacenter, podVO, clusterVO, vmDetails, cpuRequested, toHumanReadableSize(ramRequested)); - }); + logger.debug("Trying to allocate a host and storage pools from datacenter [{}], " + + "pod [{}], cluster [{}], to deploy VM [{}] with requested CPU [{}] and requested RAM [{}].", + dc::toString, () -> _podDao.findById(plan.getPodId()), () -> _clusterDao.findById(plan.getClusterId()), + vm::toString, () -> cpuRequested, () -> toHumanReadableSize(ramRequested)); - logger.debug("ROOT volume [{}] {} to deploy VM [{}].", () -> getRootVolumeUuid(_volsDao.findByInstance(vm.getId())), () -> plan.getPoolId() != null ? "is ready" : "is not ready", vm::getUuid); + logger.debug("ROOT volume [{}] {} to deploy VM [{}].", + getRootVolume(_volsDao.findByInstance(vm.getId())), + plan.getPoolId() != null ? "is ready" : "is not ready", vm); avoidDisabledResources(vmProfile, dc, avoids); avoidDifferentArchResources(vmProfile, dc, avoids); @@ -351,8 +347,9 @@ StateListener, Configurable { } } logger.debug("DeploymentPlan [{}] has not specified host. Trying to find another destination to deploy VM [{}], avoiding pods [{}], clusters [{}] and hosts [{}].", - () -> plan.getClass().getSimpleName(), vmProfile::getUuid, () -> StringUtils.join(avoids.getPodsToAvoid(), ", "), () -> StringUtils.join(avoids.getClustersToAvoid(), ", "), - () -> StringUtils.join(avoids.getHostsToAvoid(), ", ")); + plan.getClass().getSimpleName(), vmProfile, StringUtils.join(avoids.getPodsToAvoid(), ", "), + StringUtils.join(avoids.getClustersToAvoid(), ", "), + StringUtils.join(avoids.getHostsToAvoid(), ", ")); logger.debug("Deploy avoids pods: {}, clusters: {}, hosts: {}.", avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid()); @@ -364,7 +361,7 @@ StateListener, Configurable { // check if datacenter is in avoid set if (avoids.shouldAvoid(dc)) { if (logger.isDebugEnabled()) { - logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + logger.debug("DataCenter = '" + dc + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } return null; } @@ -387,9 +384,8 @@ StateListener, Configurable { boolean considerLastHost = vm.getLastHostId() != null && haVmTag == null && (considerLastHostStr == null || Boolean.TRUE.toString().equalsIgnoreCase(considerLastHostStr)); if (considerLastHost) { - logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); - HostVO host = _hostDao.findById(vm.getLastHostId()); + logger.debug("This VM has last host_id specified, trying to choose the same host: " + host); lastHost = host; DeployDestination deployDestination = deployInVmLastHost(vmProfile, plan, avoids, planner, vm, dc, offering, cpuRequested, ramRequested, volumesRequireEncryption); @@ -437,13 +433,13 @@ StateListener, Configurable { avoids.addHost(dest.getHost().getId()); if (volumesRequireEncryption && !Boolean.parseBoolean(_hostDetailsDao.findDetail(hostId, Host.HOST_VOLUME_ENCRYPTION).getValue())) { - logger.warn(String.format("VM's volumes require encryption support, and the planner-provided host %s can't handle it", dest.getHost())); + logger.warn("VM's volumes require encryption support, and the planner-provided host {} can't handle it", dest.getHost()); continue; } else { - logger.debug(String.format("VM's volume encryption requirements are met by host %s", dest.getHost())); + logger.debug("VM's volume encryption requirements are met by host {}", dest.getHost()); } - if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { + if (checkIfHostFitsPlannerUsage(dest.getHost(), DeploymentPlanner.PlannerResourceUsage.Shared)) { // found destination return dest; } else { @@ -485,7 +481,7 @@ StateListener, Configurable { _hostDao.loadDetails(host); if (host.getStatus() != Status.Up) { logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host is not in UP state or is not enabled. Host current status [{}] and resource status [{}].", - vm.getUuid(), host.getUuid(), host.getState().name(), host.getResourceState()); + vm, host, host.getState().name(), host.getResourceState()); return null; } if (checkVmProfileAndHost(vmProfile, host)) { @@ -500,23 +496,24 @@ StateListener, Configurable { if (hostHasCpuCapability) { // first check from reserved capacity - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host, cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); // if not reserved, check the free capacity if (!hostHasCapacity) - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host, cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (!hostHasCapacity || !hostHasCpuCapability) { - logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm, host); return null; } - logger.debug("Last host [{}] of VM [{}] is UP and has enough capacity. Checking for suitable pools for this host under zone [{}], pod [{}] and cluster [{}].", - host.getUuid(), vm.getUuid(), host.getDataCenterId(), host.getPodId(), host.getClusterId()); - Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); + + logger.debug("Last host [{}] of VM [{}] is UP and has enough capacity. Checking for suitable pools for this host under zone [{}], pod [{}] and cluster [{}].", + host, vm, dc, pod, cluster); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); @@ -536,7 +533,7 @@ StateListener, Configurable { // choose the potential pool for this VM for this // host if (suitableVolumeStoragePools.isEmpty()) { - logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host.getUuid(), vm.getUuid()); + logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host, vm); return null; } List suitableHosts = new ArrayList<>(); @@ -565,32 +562,31 @@ StateListener, Configurable { private boolean canUseLastHost(HostVO host, ExcludeList avoids, DeploymentPlan plan, VirtualMachine vm, ServiceOffering offering, boolean volumesRequireEncryption) { if (host == null) { - logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping this and trying other available hosts.", vm.getUuid(), vm.getLastHostId()); + logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping this and trying other available hosts.", vm, vm.getLastHostId()); return false; } if (avoids.shouldAvoid(host)) { - logger.warn("The last host [{}] of VM [{}] is in the avoid set. Skipping this and trying other available hosts.", host.getUuid(), vm.getUuid()); + logger.warn("The last host [{}] of VM [{}] is in the avoid set. Skipping this and trying other available hosts.", host, vm); return false; } if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) { - logger.debug(() -> String.format("The last host [%s] of VM [%s] cannot be picked, as the plan [%s] specifies a different cluster [%s] to deploy this VM. Skipping this and trying other available hosts.", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "clusterId"), vm.getUuid(), plan.getClass().getSimpleName(), plan.getClusterId())); + logger.debug("The last host [{}] of VM [{}] cannot be picked, as the plan [{}] specifies a different cluster [{}] to deploy this VM. Skipping this and trying other available hosts.", host, vm, plan.getClass().getSimpleName(), plan.getClusterId()); return false; } if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host already has the max number of running VMs (users and system VMs). Skipping this and trying other available hosts.", - vm.getUuid(), host.getUuid()); + vm, host); return false; } ServiceOfferingDetailsVO offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString()); ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); - if (offeringDetails != null && !_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) { + if (offeringDetails != null && !_resourceMgr.isGPUDeviceAvailable(host, groupName.getValue(), offeringDetails.getValue())) { logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host does not have the required GPU devices available. Skipping this and trying other available hosts.", - vm.getUuid(), host.getUuid()); + vm, host); return false; } @@ -605,31 +601,31 @@ StateListener, Configurable { DeploymentPlanner planner, VirtualMachine vm, DataCenter dc, String uefiFlag) throws InsufficientServerCapacityException { Long hostIdSpecified = plan.getHostId(); - logger.debug("DeploymentPlan [{}] has specified host [{}] without HA flag. Choosing this host to deploy VM [{}].", plan.getClass().getSimpleName(), hostIdSpecified, vm.getUuid()); + logger.debug("DeploymentPlan [{}] has specified host [{}] without HA flag. Choosing this host to deploy VM [{}].", plan.getClass().getSimpleName(), hostIdSpecified, vm); HostVO host = _hostDao.findById(hostIdSpecified); if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) { _hostDao.loadDetails(host); if (MapUtils.isNotEmpty(host.getDetails()) && host.getDetails().containsKey(Host.HOST_UEFI_ENABLE) && "false".equalsIgnoreCase(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { - logger.debug("Cannot deploy VM [{}] to specified host [{}] because this host does not support UEFI VM deployment, returning.", vm.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] to specified host [{}] because this host does not support UEFI VM deployment, returning.", vm, host); return null; } } if (host == null) { - logger.debug("Cannot deploy VM [{}] to host [{}] because this host cannot be found.", vm.getUuid(), hostIdSpecified); + logger.debug("Cannot deploy VM [{}] to host [{}] because this host cannot be found.", vm, hostIdSpecified); return null; } if (avoids.shouldAvoid(host)) { - logger.debug("Cannot deploy VM [{}] to host [{}] because this host is in the avoid set.", vm.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] to host [{}] because this host is in the avoid set.", vm, host); return null; } - logger.debug("Trying to find suitable pools for host [{}] under pod [{}], cluster [{}] and zone [{}], to deploy VM [{}].", - host.getUuid(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), vm.getUuid()); - Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); + logger.debug("Trying to find suitable pools for host [{}] under pod [{}], cluster [{}] and zone [{}], to deploy VM [{}].", + host, dc, pod, cluster, vm); + boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), @@ -653,8 +649,6 @@ StateListener, Configurable { suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); if (potentialResources != null) { - pod = _podDao.findById(host.getPodId()); - cluster = _clusterDao.findById(host.getClusterId()); Map storageVolMap = potentialResources.second(); for (Volume vol : readyAndReusedVolumes) { storageVolMap.remove(vol); @@ -664,14 +658,14 @@ StateListener, Configurable { return dest; } } - logger.debug("Cannot deploy VM [{}] under host [{}], because no suitable pools were found.", vmProfile.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] under host [{}], because no suitable pools were found.", vmProfile, host); return null; } - protected String getRootVolumeUuid(List volumes) { + protected Volume getRootVolume(List volumes) { for (Volume volume : volumes) { if (volume.getVolumeType() == Volume.Type.ROOT) { - return volume.getUuid(); + return volume; } } return null; @@ -743,8 +737,8 @@ StateListener, Configurable { */ protected void avoidDisabledHosts(DataCenter dc, ExcludeList avoids) { List disabledHosts = _hostDao.listDisabledByDataCenterId(dc.getId()); - logger.debug(() -> String.format("Adding hosts [%s] of datacenter [%s] to the avoid set, because these hosts are in the Disabled state.", - disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc.getUuid())); + logger.debug("Adding hosts [{}] of datacenter [{}] to the avoid set, because these hosts are in the Disabled state.", + disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc); for (HostVO host : disabledHosts) { avoids.addHost(host.getId()); } @@ -757,7 +751,8 @@ StateListener, Configurable { List pods = _podDao.listAllPods(dc.getId()); for (Long podId : pods) { List disabledClusters = _clusterDao.listDisabledClusters(dc.getId(), podId); - logger.debug(() -> String.format("Adding clusters [%s] of pod [%s] to the void set because these clusters are in the Disabled state.", StringUtils.join(disabledClusters, ", "), podId)); + logger.debug("Adding clusters [{}] of pod [{}] to the void set because these clusters are in the Disabled state.", + StringUtils.join(disabledClusters, ", "), podId); avoids.addClusterList(disabledClusters); } } @@ -767,7 +762,7 @@ StateListener, Configurable { */ protected void avoidDisabledPods(DataCenter dc, ExcludeList avoids) { List disabledPods = _podDao.listDisabledPods(dc.getId()); - logger.debug(() -> String.format("Adding pods [%s] to the avoid set because these pods are in the Disabled state.", StringUtils.join(disabledPods, ", "))); + logger.debug("Adding pods [{}] to the avoid set because these pods are in the Disabled state.", StringUtils.join(disabledPods, ", ")); avoids.addPodList(disabledPods); } @@ -776,7 +771,7 @@ StateListener, Configurable { */ protected void avoidDisabledDataCenters(DataCenter dc, ExcludeList avoids) { if (dc.getAllocationState() == Grouping.AllocationState.Disabled) { - logger.debug("Adding datacenter [{}] to the avoid set because this datacenter is in Disabled state.", dc.getUuid()); + logger.debug("Adding datacenter [{}] to the avoid set because this datacenter is in Disabled state.", dc); avoids.addDataCenter(dc.getId()); } } @@ -831,7 +826,7 @@ StateListener, Configurable { long accountDomainId = vmProfile.getOwner().getDomainId(); long accountId = vmProfile.getOwner().getAccountId(); logger.debug("Zone [{}] is dedicated. Checking if account [{}] in domain [{}] can use this zone to deploy VM [{}].", - dedicatedZone.getUuid(), accountId, accountDomainId, vmProfile.getUuid()); + dedicatedZone.getUuid(), accountId, accountDomainId, vmProfile); // If a zone is dedicated to an account then all hosts in this zone // will be explicitly dedicated to @@ -842,14 +837,14 @@ StateListener, Configurable { if (dedicatedZone.getAccountId().equals(accountId)) { return; } else { - throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user account " + vmProfile.getOwner()); + throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc + " not available for the user account " + vmProfile.getOwner()); } } // if zone is dedicated to a domain. Check owner's access to the // domain level dedication group if (!_affinityGroupService.isAffinityGroupAvailableInDomain(dedicatedZone.getAffinityGroupId(), accountDomainId)) { - throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user domain " + vmProfile.getOwner()); + throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc + " not available for the user domain " + vmProfile.getOwner()); } } @@ -954,7 +949,7 @@ StateListener, Configurable { } logger.debug(() -> LogUtils.logGsonWithoutException("Adding pods [%s], clusters [%s] and hosts [%s] to the avoid list in the deploy process of VR VM [%s], " - + "because this VM is not dedicated to this components.", allPodsInDc, allClustersInDc, allHostsInDc, vm.getUuid())); + + "because this VM is not dedicated to this components.", allPodsInDc, allClustersInDc, allHostsInDc, vm)); avoids.addPodList(allPodsInDc); avoids.addClusterList(allClustersInDc); avoids.addHostList(allHostsInDc); @@ -962,7 +957,7 @@ StateListener, Configurable { private void findAvoidSetForNonExplicitUserVM(ExcludeList avoids, VirtualMachine vm, List allPodsInDc, List allClustersInDc, List allHostsInDc) { logger.debug(() -> LogUtils.logGsonWithoutException("Adding pods [%s], clusters [%s] and hosts [%s] to the avoid list in the deploy process of user VM [%s], " - + "because this VM is not explicitly dedicated to these components.", allPodsInDc, allClustersInDc, allHostsInDc, vm.getUuid())); + + "because this VM is not explicitly dedicated to these components.", allPodsInDc, allClustersInDc, allHostsInDc, vm)); avoids.addPodList(allPodsInDc); avoids.addClusterList(allClustersInDc); avoids.addHostList(allHostsInDc); @@ -997,14 +992,14 @@ StateListener, Configurable { } @DB - protected boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerResourceUsage resourceUsageRequired) { + protected boolean checkIfHostFitsPlannerUsage(final Host host, final PlannerResourceUsage resourceUsageRequired) { // TODO Auto-generated method stub // check if this host has been picked up by some other planner // exclusively // if planner can work with shared host, check if this host has // been marked as 'shared' // else if planner needs dedicated host, - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); if (reservationEntry != null) { final long id = reservationEntry.getId(); PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage(); @@ -1026,7 +1021,7 @@ StateListener, Configurable { public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: {}", host); return false; } // check before updating @@ -1055,22 +1050,22 @@ StateListener, Configurable { } @DB - public boolean checkHostReservationRelease(final Long hostId) { + public boolean checkHostReservationRelease(final Host host) { - if (hostId != null) { - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (host != null) { + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); if (reservationEntry != null && reservationEntry.getResourceUsage() != null) { // check if any VMs are starting or running on this host - List vms = _vmInstanceDao.listUpByHostId(hostId); + List vms = _vmInstanceDao.listUpByHostId(host.getId()); if (vms.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); + logger.debug("Cannot release reservation, Found {} VMs Running on host {}", vms.size(), host); } return false; } - List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); + List vmsByLastHostId = _vmInstanceDao.listByLastHostId(host.getId()); if (vmsByLastHostId.size() > 0) { // check if any VMs are within skip.counting.hours, if yes // we @@ -1079,7 +1074,7 @@ StateListener, Configurable { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { if (logger.isDebugEnabled()) { - logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); + logger.debug("Cannot release reservation, Found VM: {} Stopped but reserved on host {}", stoppedVM, host); } return false; } @@ -1087,10 +1082,10 @@ StateListener, Configurable { } // check if any VMs are stopping on or migrating to this host - List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); + List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(host.getId(), State.Stopping, State.Migrating, State.Starting); if (vmsStoppingMigratingByHostId.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId); + logger.debug("Cannot release reservation, Found {} VMs stopping/migrating/starting on host {}", vmsStoppingMigratingByHostId.size(), host); } return false; } @@ -1108,7 +1103,7 @@ StateListener, Configurable { } if (logger.isDebugEnabled()) { - logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + logger.debug("Host has no VMs associated, releasing the planner reservation for host {}", host); } final long id = reservationEntry.getId(); @@ -1118,7 +1113,7 @@ StateListener, Configurable { public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: {}", host); return false; } // check before updating @@ -1156,7 +1151,7 @@ StateListener, Configurable { for (PlannerHostReservationVO hostReservation : reservedHosts) { HostVO host = _hostDao.findById(hostReservation.getHostId()); if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { - checkHostReservationRelease(hostReservation.getHostId()); + checkHostReservationRelease(host); } } @@ -1239,10 +1234,9 @@ StateListener, Configurable { @Override public void onPublishMessage(String senderAddress, String subject, Object obj) { VMInstanceVO vm = ((VMInstanceVO)obj); - logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + - ", checking if host reservation can be released for host:" + vm.getLastHostId()); - Long hostId = vm.getLastHostId(); - checkHostReservationRelease(hostId); + Host host = _hostDao.findById(vm.getLastHostId()); + logger.debug("MessageBus message: host reserved capacity released for VM: {}, checking if host reservation can be released for host:{}", vm, host); + checkHostReservationRelease(host); } }); @@ -1308,21 +1302,21 @@ StateListener, Configurable { if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { logger.debug("Adding cluster [{}] to the avoid set because the cluster's hypervisor [{}] does not match the VM [{}] hypervisor: [{}]. Skipping this cluster.", - clusterVO.getUuid(), clusterVO.getHypervisorType().name(), vmProfile.getUuid(), vmProfile.getHypervisorType().name()); + clusterVO, clusterVO.getHypervisorType().name(), vmProfile, vmProfile.getHypervisorType().name()); avoid.addCluster(clusterVO.getId()); continue; } - logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + Pod pod = _podDao.findById(clusterVO.getPodId()); + logger.debug("Checking resources in Cluster: " + clusterVO + " under Pod: " + pod); // search for resources(hosts and storage) under this zone, pod, // cluster. DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); potentialPlan.setHostPriorities(plan.getHostPriorities()); - Pod pod = _podDao.findById(clusterVO.getPodId()); if (CollectionUtils.isNotEmpty(avoid.getPodsToAvoid()) && avoid.getPodsToAvoid().contains(pod.getId())) { - logger.debug("The cluster is in a disabled pod : " + pod.getId()); + logger.debug("The cluster is in a disabled pod : " + pod); } else { // find suitable hosts under this cluster, need as many hosts as we // get. @@ -1357,10 +1351,10 @@ StateListener, Configurable { return dest; } } else { - logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + logger.debug("No suitable storagePools found under this Cluster: " + clusterVO); } } else { - logger.debug("No suitable hosts found under this Cluster: " + clusterId); + logger.debug("No suitable hosts found under this Cluster: " + clusterVO); } } @@ -1517,7 +1511,7 @@ StateListener, Configurable { } else { for (StoragePool pool : pools) { if (!suitablePools.contains(pool)) { - logger.debug("Storage pool " + pool.getUuid() + " not allowed for this VM"); + logger.debug("Storage pool " + pool + " not allowed for this VM"); notAllowedPools.add(pool); } } @@ -1546,7 +1540,7 @@ StateListener, Configurable { continue; } } catch (StorageUnavailableException e) { - logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", storagePool.getUuid(), e.getMessage())); + logger.warn("Could not verify storage policy complaince against storage pool {} due to exception {}", storagePool, e.getMessage()); continue; } haveEnoughSpace = true; @@ -1554,7 +1548,7 @@ StateListener, Configurable { } if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck) { for (Volume vol : volumesOrderBySizeDesc) { - logger.debug("Found a suitable storage pool for all the VM volumes: " + storagePool.getUuid()); + logger.debug("Found a suitable storage pool for all the VM volumes: {}", storagePool); storage.put(vol, storagePool); } break; @@ -1563,7 +1557,7 @@ StateListener, Configurable { } else { for (Volume vol : volumesOrderBySizeDesc) { haveEnoughSpace = false; - logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + logger.debug("Checking if host: {} can access any suitable storage pool for volume: {}", potentialHost, vol.getVolumeType()); List volumePoolList = suitableVolumeStoragePools.get(vol); hostCanAccessPool = false; hostAffinityCheck = checkAffinity(potentialHost, preferredHosts); @@ -1585,7 +1579,7 @@ StateListener, Configurable { continue; } } catch (StorageUnavailableException e) { - logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", potentialSPool.getUuid(), e.getMessage())); + logger.warn("Could not verify storage policy complaince against storage pool {} due to exception {}", potentialSPool, e.getMessage()); continue; } } @@ -1619,19 +1613,18 @@ StateListener, Configurable { boolean hostHasEncryption = Boolean.parseBoolean(potentialHostVO.getDetail(Host.HOST_VOLUME_ENCRYPTION)); boolean hostMeetsEncryptionRequirements = !anyVolumeRequiresEncryption(new ArrayList<>(volumesOrderBySizeDesc)) || hostHasEncryption; - boolean hostFitsPlannerUsage = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); + boolean hostFitsPlannerUsage = checkIfHostFitsPlannerUsage(potentialHost, resourceUsageRequired); if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && hostFitsPlannerUsage) { - logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + - " and associated storage pools for this VM"); + logger.debug("Found a potential host {} and associated storage pools for this VM", potentialHost); volumeAllocationMap.clear(); return new Pair<>(potentialHost, storage); } else { logger.debug("Adding host [{}] to the avoid set because: can access Pool [{}], has enough space [{}], affinity check [{}], fits planner [{}] usage [{}].", - potentialHost.getUuid(), hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); + potentialHost, hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); if (!hostMeetsEncryptionRequirements) { - logger.debug("Potential host " + potentialHost + " did not meet encryption requirements of all volumes"); + logger.debug("Potential host {} did not meet encryption requirements of all volumes", potentialHost); } avoid.addHost(potentialHost.getId()); } @@ -1672,13 +1665,13 @@ StateListener, Configurable { hostCanAccessSPool = true; } - logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); + logger.debug("Host: {}{} access pool: {}", host, hostCanAccessSPool ? " can" : " cannot", pool); if (!hostCanAccessSPool) { if (_storageMgr.canHostPrepareStoragePoolAccess(host, pool)) { - logger.debug("Host: " + host.getId() + " can prepare access to pool: " + pool.getId()); + logger.debug("Host: {} can prepare access to pool: {}", host, pool); hostCanAccessSPool = true; } else { - logger.debug("Host: " + host.getId() + " cannot prepare access to pool: " + pool.getId()); + logger.debug("Host: {} cannot prepare access to pool: {}", host, pool); } } @@ -1705,7 +1698,7 @@ StateListener, Configurable { @Override public void reorderHostsByPriority(Map priorities, List hosts) { - logger.info("Re-ordering hosts " + hosts + " by priorities " + priorities); + logger.info("Re-ordering hosts {} by priorities {}", hosts, priorities); hosts.removeIf(host -> DataCenterDeployment.PROHIBITED_HOST_PRIORITY.equals(getHostPriority(priorities, host.getId()))); @@ -1718,7 +1711,7 @@ StateListener, Configurable { } ); - logger.info("Hosts after re-ordering are: " + hosts); + logger.info("Hosts after re-ordering are: {}", hosts); } private Integer getHostPriority(Map priorities, Long hostId) { @@ -1751,16 +1744,15 @@ StateListener, Configurable { Set poolsToAvoidOutput = new HashSet<>(originalAvoidPoolSet); for (VolumeVO toBeCreated : volumesTobeCreated) { - logger.debug("Checking suitable pools for volume [{}, {}] of VM [{}].", toBeCreated.getUuid(), toBeCreated.getVolumeType().name(), vmProfile.getUuid()); + logger.debug("Checking suitable pools for volume [{}, {}] of VM [{}].", toBeCreated, toBeCreated.getVolumeType().name(), vmProfile); if (toBeCreated.getState() == Volume.State.Allocated && toBeCreated.getPoolId() != null) { toBeCreated.setPoolId(null); if (!_volsDao.update(toBeCreated.getId(), toBeCreated)) { - throw new CloudRuntimeException(String.format("Error updating volume [%s] to clear pool Id.", toBeCreated.getId())); + throw new CloudRuntimeException(String.format("Error updating volume [%s] to clear pool Id.", toBeCreated)); } if (logger.isDebugEnabled()) { - String msg = String.format("Setting pool_id to NULL for volume id=%s as it is in Allocated state", toBeCreated.getId()); - logger.debug(msg); + logger.debug("Setting pool_id to NULL for volume id={} as it is in Allocated state", toBeCreated); } } // If the plan specifies a poolId, it means that this VM's ROOT @@ -1773,8 +1765,8 @@ StateListener, Configurable { } if (!isRootAdmin(vmProfile) && !isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { - logger.debug(String.format("Cannot find new storage pool to deploy volume [{}] of VM [{}] in cluster [{}] because allocation state is disabled. Returning.", - toBeCreated.getUuid(), vmProfile.getUuid(), plan.getClusterId())); + logger.debug("Cannot find new storage pool to deploy volume [{}] of VM [{}] in cluster [{}] because allocation state is disabled. Returning.", + toBeCreated, vmProfile, plan.getClusterId()); suitableVolumeStoragePools.clear(); break; } @@ -1789,13 +1781,13 @@ StateListener, Configurable { Boolean useLocalStorageForSystemVM = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(zone.getId()); if (useLocalStorageForSystemVM != null) { useLocalStorage = useLocalStorageForSystemVM.booleanValue(); - logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId()); + logger.debug("System VMs will use {} storage for zone {}", useLocalStorage ? "local" : "shared", zone); } } else { useLocalStorage = diskOffering.isUseLocalStorage(); } diskProfile.setUseLocalStorage(useLocalStorage); - logger.debug("Calling StoragePoolAllocators to find suitable pools to allocate volume [{}] necessary to deploy VM [{}].", toBeCreated.getUuid(), vmProfile.getUuid()); + logger.debug("Calling StoragePoolAllocators to find suitable pools to allocate volume [{}] necessary to deploy VM [{}].", toBeCreated, vmProfile); boolean foundPotentialPools = tryToFindPotentialPoolsToAlocateVolume(vmProfile, plan, avoid, returnUpTo, suitableVolumeStoragePools, toBeCreated, diskProfile); if (avoid.getPoolsToAvoid() != null) { @@ -1804,7 +1796,7 @@ StateListener, Configurable { } if (!foundPotentialPools) { - logger.debug(String.format("No suitable pools found for volume [{}] used by VM [{}] under cluster: [{}].", toBeCreated.getUuid(), vmProfile.getUuid(), plan.getClusterId())); + logger.debug("No suitable pools found for volume [{}] used by VM [{}] under cluster: [{}].", toBeCreated, vmProfile, plan.getClusterId()); // No suitable storage pools found under this cluster for this // volume. - remove any suitable pools found for other volumes. // All volumes should get suitable pools under this cluster; @@ -1837,12 +1829,12 @@ StateListener, Configurable { Map> suitableVolumeStoragePools, VolumeVO toBeCreated, DiskProfile diskProfile) { for (StoragePoolAllocator allocator : _storagePoolAllocators) { logger.debug("Trying to find suitable pools to allocate volume [{}] necessary to deploy VM [{}], using StoragePoolAllocator: [{}].", - toBeCreated.getUuid(), vmProfile.getUuid(), allocator.getClass().getSimpleName()); + toBeCreated, vmProfile, allocator.getClass().getSimpleName()); final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); if (suitablePools != null && !suitablePools.isEmpty()) { logger.debug("StoragePoolAllocator [{}] found {} suitable pools to allocate volume [{}] necessary to deploy VM [{}].", - allocator.getClass().getSimpleName(), suitablePools.size(), toBeCreated.getUuid(), vmProfile.getUuid()); + allocator.getClass().getSimpleName(), suitablePools.size(), toBeCreated, vmProfile); checkForPreferredStoragePool(suitablePools, vmProfile.getVirtualMachine(), suitableVolumeStoragePools, toBeCreated); return true; } @@ -1853,7 +1845,6 @@ StateListener, Configurable { private boolean checkIfPoolCanBeReused(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, Map> suitableVolumeStoragePools, List readyAndReusedVolumes, VolumeVO toBeCreated) { - logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated.getUuid(), vmProfile.getUuid(), toBeCreated.getPoolId()); List suitablePools = new ArrayList<>(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { @@ -1862,16 +1853,18 @@ StateListener, Configurable { pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); } + logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated, vmProfile, pool); + if (!pool.isInMaintenance()) { if (!avoid.shouldAvoid(pool)) { return canReusePool(vmProfile, plan, suitableVolumeStoragePools, readyAndReusedVolumes, toBeCreated, suitablePools, pool); } else { logger.debug("Pool [{}] of volume [{}] used by VM [{}] is in the avoid set. Need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); } } else { logger.debug("Pool [{}] of volume [{}] used by VM [{}] is in maintenance. Need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); } return false; } @@ -1888,7 +1881,7 @@ StateListener, Configurable { if (plan.getDataCenterId() == exstPoolDcId && ((plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) || (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE))) { logger.debug("Pool [{}] of volume [{}] used by VM [{}] fits the specified plan. No need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); suitablePools.add(pool); suitableVolumeStoragePools.put(toBeCreated, suitablePools); if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { @@ -1898,7 +1891,7 @@ StateListener, Configurable { } logger.debug("Pool [{}] of volume [{}] used by VM [{}] does not fit the specified plan. Need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); return false; } @@ -1928,13 +1921,11 @@ StateListener, Configurable { Optional storagePool = getMatchingStoragePool(accountStoragePoolUuid, poolList); if (storagePool.isPresent()) { - logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: " - + storagePool.get().getUuid()); + logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: {}", storagePool.get()); } else { String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); - storagePool.ifPresent(pool -> logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: " - + pool.getUuid())); + storagePool.ifPresent(pool -> logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: {}", pool)); } return storagePool; } @@ -1943,19 +1934,19 @@ StateListener, Configurable { // Check if the zone exists in the system DataCenterVO zone = _dcDao.findById(zoneId); if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { - logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + logger.info("Zone is currently disabled, cannot allocate to this zone: {}", zone); return false; } Pod pod = _podDao.findById(podId); if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { - logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + logger.info("Pod is currently disabled, cannot allocate to this pod: {}", pod); return false; } Cluster cluster = _clusterDao.findById(clusterId); if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { - logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + logger.info("Cluster is currently disabled, cannot allocate to this cluster: {}", cluster); return false; } diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index 46e6c369c33..abaf48400e2 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -133,7 +133,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla //check if datacenter is in avoid set if (avoid.shouldAvoid(dc)) { if (logger.isDebugEnabled()) { - logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + logger.debug("DataCenter {} provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.", dc); } return null; } @@ -141,8 +141,8 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla List clusterList = new ArrayList<>(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); - logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); ClusterVO cluster = clusterDao.findById(plan.getClusterId()); + logger.debug("Searching resources only under specified Cluster: {}", cluster != null ? cluster : clusterIdSpecified); if (cluster != null) { if (avoid.shouldAvoid(cluster)) { logger.debug("The specified cluster is in avoid set, returning."); @@ -158,9 +158,9 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } else if (plan.getPodId() != null) { //consider clusters under this pod only Long podIdSpecified = plan.getPodId(); - logger.debug("Searching resources only under specified Pod: " + podIdSpecified); HostPodVO pod = podDao.findById(podIdSpecified); + logger.debug("Searching resources only under specified Pod: {}", pod != null ? pod : podIdSpecified); if (pod != null) { if (avoid.shouldAvoid(pod)) { logger.debug("The specified pod is in avoid set, returning."); @@ -176,7 +176,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla return null; } } else { - logger.debug("Searching all possible resources under this Zone: " + plan.getDataCenterId()); + logger.debug("Searching all possible resources under this Zone: {}", dcDao.findById(plan.getDataCenterId())); boolean applyAllocationAtPods = Boolean.parseBoolean(configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key())); if (applyAllocationAtPods) { diff --git a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java index b65865e732b..4ee6adeab60 100644 --- a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java @@ -64,23 +64,22 @@ public abstract class AbstractInvestigatorImpl extends AdapterBase implements In } // Host.status is up and Host.type is routing - protected List findHostByPod(long podId, Long excludeHostId) { + protected List findHostByPod(long podId, Long excludeHostId) { QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getType(), Op.EQ, Type.Routing); sc.and(sc.entity().getPodId(), Op.EQ, podId); sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); List hosts = sc.list(); - List hostIds = new ArrayList(hosts.size()); - for (HostVO h : hosts) { - hostIds.add(h.getId()); + List hostList = new ArrayList<>(hosts.size()); + for (HostVO host : hosts) { + if (excludeHostId != null && host.getId() == excludeHostId) { + continue; + } + hostList.add(host); } - if (excludeHostId != null) { - hostIds.remove(excludeHostId); - } - - return hostIds; + return hostList; } // Method only returns Status.Up, Status.Down and Status.Unknown diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index 20435f48b52..e10bd47a067 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -256,7 +256,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur hostState = investigator.isAgentAlive(host); if (hostState != null) { if (logger.isDebugEnabled()) { - logger.debug(investigator.getName() + " was able to determine host " + hostId + " is in " + hostState.toString()); + logger.debug("{} was able to determine host {} is in {}", investigator.getName(), host, hostState.toString()); } return hostState; } @@ -276,12 +276,12 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } if (host.getHypervisorType() == HypervisorType.VMware || host.getHypervisorType() == HypervisorType.Hyperv) { - logger.info("Don't restart VMs on host " + host.getId() + " as it is a " + host.getHypervisorType().toString() + " host"); + logger.info("Don't restart VMs on host {} as it is a {} host", host, host.getHypervisorType().toString()); return; } if (!VmHaEnabled.valueIn(host.getDataCenterId())) { - String message = String.format("Unable to schedule restart for VMs on host %s (%d), VM high availability manager is disabled.", host.getName(), host.getId()); + String message = String.format("Unable to schedule restart for VMs on host %s, VM high availability manager is disabled.", host); if (logger.isDebugEnabled()) { logger.debug(message); } @@ -289,7 +289,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur return; } - logger.warn("Scheduling restart for VMs on host " + host.getId() + "-" + host.getName()); + logger.warn("Scheduling restart for VMs on host {}", host); final List vms = _instanceDao.listByHostId(host.getId()); final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); @@ -329,13 +329,12 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur continue; } if (logger.isDebugEnabled()) { - logger.debug("Notifying HA Mgr of to restart vm " + vm.getId() + "-" + vm.getInstanceName()); + logger.debug("Notifying HA Mgr of to restart vm {}", vm); } vm = _instanceDao.findByUuid(vm.getUuid()); Long hostId = vm.getHostId(); if (hostId != null && !hostId.equals(host.getId())) { - logger.debug("VM " + vm.getInstanceName() + " is not on down host " + host.getId() + " it is on other host " - + hostId + " VM HA is done"); + logger.debug("VM {} is not on down host {} it is on other host {} VM HA is done", vm, host, hostId); continue; } scheduleRestart(vm, investigate); @@ -383,7 +382,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } if (!VmHaEnabled.valueIn(vm.getDataCenterId())) { - String message = String.format("Unable to schedule migration for the VM %s (%d) on host %d, VM high availability manager is disabled.", vm.getName(), vm.getId(), vm.getHostId()); + String message = String.format("Unable to schedule migration for the VM %s on host %s, VM high availability manager is disabled.", vm, _hostDao.findById(vm.getHostId())); if (logger.isDebugEnabled()) { logger.debug(message); } @@ -393,7 +392,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur final HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), WorkType.Migration, Step.Scheduled, vm.getHostId(), vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - logger.info("Scheduled migration work of VM " + vm.getUuid() + " from host " + _hostDao.findById(vm.getHostId()) + " with HAWork " + work); + logger.info("Scheduled migration work of VM {} from host {} with HAWork {}", vm, _hostDao.findById(vm.getHostId()), work); wakeupWorkers(); return true; } @@ -584,20 +583,20 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur if (host == null) { host = _hostDao.findByIdIncludingRemoved(work.getHostId()); if (host != null) { - logger.debug("VM " + vm.toString() + " is now no longer on host " + work.getHostId() + " as the host is removed"); + logger.debug("VM {} is now no longer on host {} as the host is removed", vm, host); isHostRemoved = true; } } DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); HostPodVO podVO = _podDao.findById(host.getPodId()); - String hostDesc = "name: " + host.getName() + "(id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO.getName(), podVO.getName()); Boolean alive = null; if (work.getStep() == Step.Investigating) { if (!isHostRemoved) { if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) { - logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId()); + logger.info("VM {} is now no longer on host {}", vm, host); return null; } @@ -629,7 +628,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } else if (!alive) { fenced = true; } else { - logger.debug("VM " + vm.getInstanceName() + " is found to be alive by " + investigator.getName()); + logger.debug("VM {} is found to be alive by {}", vm, investigator.getName()); if (host.getStatus() == Status.Up) { logger.info(vm + " is alive and host is up. No need to restart it."); return null; @@ -724,7 +723,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur // First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency. startVm(vm, params, null); } catch (InsufficientCapacityException e){ - logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner", vm); startVm(vm, params, _haPlanners.get(0)); } @@ -743,19 +742,19 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } catch (final InsufficientCapacityException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "Insufficient capacity to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("Insufficient capacity to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } catch (final ResourceUnavailableException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The resource is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("The resource is unavailable for trying to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } catch (ConcurrentOperationException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("The Storage is unavailable for trying to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } catch (OperationTimedoutException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The operation timed out while trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("The operation timed out while trying to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } vm = _itMgr.findById(vm.getId()); work.setUpdateTime(vm.getUpdated()); @@ -766,14 +765,14 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur public Long migrate(final HaWorkVO work) { long vmId = work.getInstanceId(); long srcHostId = work.getHostId(); + HostVO srcHost = _hostDao.findById(srcHostId); VMInstanceVO vm = _instanceDao.findById(vmId); if (vm == null) { logger.info("Unable to find vm: " + vmId + ", skipping migrate."); return null; } - logger.info("Migration attempt: for VM " + vm.getUuid() + "from host id " + srcHostId + - ". Starting attempt: " + (1 + work.getTimesTried()) + "/" + _maxRetries + " times."); + logger.info("Migration attempt: for VM {}from host {}. Starting attempt: {}/{} times.", vm, srcHost, 1 + work.getTimesTried(), _maxRetries); try { work.setStep(Step.Migrating); _haDao.update(work.getId(), work); @@ -782,14 +781,11 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur _itMgr.migrateAway(vm.getUuid(), srcHostId); return null; } catch (InsufficientServerCapacityException e) { - logger.warn("Migration attempt: Insufficient capacity for migrating a VM " + - vm.getUuid() + " from source host id " + srcHostId + - ". Exception: " + e.getMessage()); + logger.warn("Migration attempt: Insufficient capacity for migrating a VM {} from source host {}. Exception: {}", vm, srcHost, e.getMessage()); _resourceMgr.migrateAwayFailed(srcHostId, vmId); return (System.currentTimeMillis() >> 10) + _migrateRetryInterval; } catch (Exception e) { - logger.warn("Migration attempt: Unexpected exception occurred when attempting migration of " + - vm.getUuid() + e.getMessage()); + logger.warn("Migration attempt: Unexpected exception occurred when attempting migration of {} {}", vm, e.getMessage()); throw e; } } @@ -845,7 +841,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur boolean expunge = VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType()) || VirtualMachine.Type.ConsoleProxy.equals(vm.getType()); if (!expunge && VirtualMachine.State.Destroyed.equals(work.getPreviousState())) { - logger.info("VM " + vm.getUuid() + " already in " + vm.getState() + " state. Throwing away " + work); + logger.info("VM {} already in {} state. Throwing away {}", vm, vm.getState(), work); return null; } try { @@ -854,7 +850,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur destroyVM(vm, expunge); return null; } else { - logger.info("VM " + vm.getUuid() + " still in " + vm.getState() + " state."); + logger.info("VM {} still in {} state.", vm, vm.getState()); } } catch (final AgentUnavailableException e) { logger.debug("Agent is not available" + e.getMessage()); @@ -885,8 +881,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } else if (work.getWorkType() == WorkType.CheckStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + - (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); + HostVO scheduledHost = _hostDao.findById(work.getHostId()); + HostVO currentHost = vm.getHostId() != null ? _hostDao.findById(vm.getHostId()) : null; + logger.info("{} is different now. Scheduled Host: {} Current Host: {} State: {}", vm, scheduledHost, currentHost != null ? currentHost : "none", vm.getState()); return null; } @@ -896,8 +893,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } else if (work.getWorkType() == WorkType.ForceStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + - (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); + HostVO scheduledHost = _hostDao.findById(work.getHostId()); + HostVO currentHost = vm.getHostId() != null ? _hostDao.findById(vm.getHostId()) : null; + logger.info("{} is different now. Scheduled Host: {} Current Host: {} State: {}", vm, scheduledHost, currentHost != null ? currentHost : "none", vm.getState()); return null; } @@ -919,7 +917,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur @Override public void cancelScheduledMigrations(final HostVO host) { WorkType type = host.getType() == HostVO.Type.Storage ? WorkType.Stop : WorkType.Migration; - logger.info("Canceling all scheduled migrations from host " + host.getUuid()); + logger.info("Canceling all scheduled migrations from host {}", host); _haDao.deleteMigrationWorkItems(host.getId(), type, _serverId); } diff --git a/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java b/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java index ce45d662082..0972f2451af 100644 --- a/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java +++ b/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java @@ -71,9 +71,9 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { continue; } // get the data center IP address, find a host on the pod, use that host to ping the data center IP address - List otherHosts = findHostByPod(vmHost.getPodId(), vm.getHostId()); - for (Long otherHost : otherHosts) { - Status vmState = testIpAddress(otherHost, nic.getIPv4Address()); + List otherHosts = findHostByPod(vmHost.getPodId(), vm.getHostId()); + for (HostVO otherHost : otherHosts) { + Status vmState = testIpAddress(otherHost.getId(), nic.getIPv4Address()); assert vmState != null; // In case of Status.Unknown, next host will be tried if (vmState == Status.Up) { @@ -84,7 +84,7 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { } else if (vmState == Status.Down) { // We can't ping the VM directly...if we can ping the host, then report the VM down. // If we can't ping the host, then we don't have enough information. - Status vmHostState = testIpAddress(otherHost, vmHost.getPrivateIpAddress()); + Status vmHostState = testIpAddress(otherHost.getId(), vmHost.getPrivateIpAddress()); assert vmHostState != null; if (vmHostState == Status.Up) { if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java b/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java index 90d34799d3d..7d063b3088e 100644 --- a/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java +++ b/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java @@ -18,6 +18,7 @@ package com.cloud.ha; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -26,6 +27,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.PingTestCommand; import com.cloud.host.Host; +import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.NetworkModel; @@ -73,7 +75,8 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { List routers = _vnaMgr.getRoutersForNetwork(nic.getNetworkId()); if (routers == null || routers.isEmpty()) { if (logger.isDebugEnabled()) { - logger.debug("Unable to find a router in network " + nic.getNetworkId() + " to ping " + vm); + logger.debug("Unable to find a router in network {} to ping {}", + _networkMgr.getNetwork(nic.getNetworkId()), vm); } continue; } @@ -102,26 +105,26 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { @Override public Status isAgentAlive(Host agent) { if (logger.isDebugEnabled()) { - logger.debug("checking if agent (" + agent.getId() + ") is alive"); + logger.debug("checking if agent ({}) is alive", agent); } if (agent.getPodId() == null) { return null; } - List otherHosts = findHostByPod(agent.getPodId(), agent.getId()); + List otherHosts = findHostByPod(agent.getPodId(), agent.getId()); - for (Long hostId : otherHosts) { + for (HostVO host : otherHosts) { if (logger.isDebugEnabled()) { - logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")"); + logger.debug("sending ping from ({}) to agent's host ip address ({})", host, agent.getPrivateIpAddress()); } - Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress()); + Status hostState = testIpAddress(host.getId(), agent.getPrivateIpAddress()); assert hostState != null; // In case of Status.Unknown, next host will be tried if (hostState == Status.Up) { if (logger.isDebugEnabled()) { - logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + - ") successful, returning that agent is disconnected"); + logger.debug("ping from ({}) to agent's host ip address ({}) successful, returning that agent is disconnected", + host, agent.getPrivateIpAddress()); } return Status.Disconnected; // the computing host ip is ping-able, but the computing agent is down, report that the agent is disconnected } else if (hostState == Status.Down) { @@ -157,15 +160,17 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { if (vm.getHypervisorType() == HypervisorType.XenServer || vm.getHypervisorType() == HypervisorType.KVM) { otherHosts.add(router.getHostId()); } else { - otherHosts = findHostByPod(router.getPodIdToDeployIn(), null); + List otherHostsList = findHostByPod(router.getPodIdToDeployIn(), null); + otherHosts = otherHostsList.stream().map(HostVO::getId).collect(Collectors.toList()); } for (Long hostId : otherHosts) { try { Answer pingTestAnswer = _agentMgr.easySend(hostId, new PingTestCommand(routerPrivateIp, privateIp)); if (pingTestAnswer != null && pingTestAnswer.getResult()) { if (logger.isDebugEnabled()) { - logger.debug("user vm's " + vm.getHostName() + " ip address " + privateIp + " has been successfully pinged from the Virtual Router " + - router.getHostName() + ", returning that vm is alive"); + logger.debug("user vm's {} ip address {} has been successfully " + + "pinged from the Virtual Router {}, returning that vm is alive", + vm, privateIp, router); } return Boolean.TRUE; } diff --git a/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java index 961e11e91d7..f2704f35fcd 100644 --- a/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -131,13 +131,13 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } } if (logger.isDebugEnabled()) { - logger.debug("Successfully loaded the DataCenter from the zone token passed in "); + logger.debug("Successfully loaded the DataCenter {} from the zone token passed in ", zone); } HostPodVO pod = findPod(startup, zone.getId(), Host.Type.Routing); //yes, routing Long podId = null; if (pod != null) { - logger.debug("Found pod " + pod.getName() + " for the secondary storage host " + startup.getName()); + logger.debug("Found pod {} for the secondary storage host {}", pod, startup.getName()); podId = pod.getId(); } host.setDataCenterId(zone.getId()); diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index a5b2a3b75a5..c510502f5f9 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -206,7 +206,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis } to.setNicSecIps(secIps); } else { - logger.warn("Unabled to load NicVO for NicProfile " + profile.getId()); + logger.warn("Unabled to load NicVO for NicProfile {}", profile); //Workaround for dynamically created nics //FixMe: uuid and secondary IPs can be made part of nic profile to.setUuid(UUID.randomUUID().toString()); diff --git a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java index c27adc59fde..9edaa5e6d64 100644 --- a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java +++ b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java @@ -134,21 +134,22 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru { if (host == null) { throw new CloudRuntimeException("Host with id: " + vm.getHostId() + " not found"); } - logger.debug("Limiting CPU usage for VM: " + vm.getUuid() + " on host: " + host.getUuid()); + logger.debug("Limiting CPU usage for VM: {} on host: {}", vm, host); double hostMaxSpeed = getHostCPUSpeed(host); double maxSpeed = getVmSpeed(to); try { BigDecimal percent = new BigDecimal(maxSpeed / hostMaxSpeed); percent = percent.setScale(2, RoundingMode.HALF_DOWN); if (percent.compareTo(new BigDecimal(1)) == 1) { - logger.debug("VM " + vm.getUuid() + " CPU MHz exceeded host " + host.getUuid() + " CPU MHz, limiting VM CPU to the host maximum"); + logger.debug("VM {} CPU MHz exceeded host {} CPU MHz, limiting VM CPU to the host maximum", vm, host); percent = new BigDecimal(1); } to.setCpuQuotaPercentage(percent.doubleValue()); - logger.debug("Host: " + host.getUuid() + " max CPU speed = " + hostMaxSpeed + "MHz, VM: " + vm.getUuid() + - "max CPU speed = " + maxSpeed + "MHz. Setting CPU quota percentage as: " + percent.doubleValue()); + logger.debug("Host: {} max CPU speed = {} MHz, VM: {} max CPU speed = {} MHz. " + + "Setting CPU quota percentage as: {}", + host, hostMaxSpeed, vm, maxSpeed, percent.doubleValue()); } catch (NumberFormatException e) { - logger.error("Error calculating VM: " + vm.getUuid() + " quota percentage, it wll not be set. Error: " + e.getMessage(), e); + logger.error("Error calculating VM: {} quota percentage, it will not be set. Error: {}", vm, e.getMessage(), e); } } } @@ -241,9 +242,11 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru { } Long lastHostId = virtualMachine.getLastHostId(); - logger.info(String.format("%s is not running; therefore, we use the last host [%s] that the VM was running on to derive the unconstrained service offering max CPU and memory.", vmDescription, lastHostId)); HostVO lastHost = lastHostId == null ? null : hostDao.findById(lastHostId); + logger.info("{} is not running; therefore, we use the last host [{}] with id {} that the " + + "VM was running on to derive the unconstrained service offering max CPU " + + "and memory.", vmDescription, lastHost, lastHostId); if (lastHost != null) { maxHostMemory = lastHost.getTotalMemory(); maxHostCpuCore = lastHost.getCpus(); diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java b/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java index 361b1302b28..a4afdb394c1 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java @@ -75,10 +75,9 @@ public class DpdkHelperImpl implements DpdkHelper { VHostUserMode dpdKvHostUserMode = VHostUserMode.fromValue(mode); to.addExtraConfig(DPDK_VHOST_USER_MODE, dpdKvHostUserMode.toString()); } catch (IllegalArgumentException e) { - logger.error(String.format("DPDK vHost User mode found as a detail for service offering: %s " + - "but value: %s is not supported. Supported values: %s, %s", - offering.getId(), mode, - VHostUserMode.CLIENT.toString(), VHostUserMode.SERVER.toString())); + logger.error("DPDK vHost User mode found as a detail for service offering: {} " + + "but value: {} is not supported. Supported values: {}, {}", + offering, mode, VHostUserMode.CLIENT.toString(), VHostUserMode.SERVER.toString()); } } } diff --git a/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java index 329e4b90379..bffc35f72df 100644 --- a/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java @@ -240,7 +240,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter lbAnswer = (ExternalNetworkResourceUsageAnswer)_agentMgr.easySend(externalLoadBalancer.getId(), cmd); if (lbAnswer == null || !lbAnswer.getResult()) { String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; - String msg = "Unable to get external load balancer stats for network" + networkId + " due to: " + details + "."; + String msg = String.format("Unable to get external load balancer stats for network %s due to: %s.", network, details); logger.error(msg); return; } @@ -413,7 +413,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter for (NetworkVO network : networksForAccount) { if (!_networkModel.networkIsConfiguredForExternalNetworking(zoneId, network.getId())) { - logger.debug("Network " + network.getId() + " is not configured for external networking, so skipping usage check."); + logger.debug("Network {} is not configured for external networking, so skipping usage check.", network); continue; } @@ -456,7 +456,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } } else { if (logger.isTraceEnabled()) { - logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); + logger.trace("Reusing usage Answer for device id {} for Network {}", fwDeviceId, network); } firewallAnswer = fwDeviceUsageAnswerMap.get(fwDeviceId); } @@ -491,7 +491,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } } else { if (logger.isTraceEnabled()) { - logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); + logger.trace("Reusing usage Answer for device id {} for Network {}", lbDeviceId, network); } lbAnswer = lbDeviceUsageAnswerMap.get(lbDeviceId); } @@ -598,7 +598,7 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } else { URI broadcastURI = network.getBroadcastUri(); if (broadcastURI == null) { - logger.debug("Not updating stats for guest network with ID " + network.getId() + " because the network is not implemented."); + logger.debug("Not updating stats for guest network {} because the network is not implemented.", network); return true; } else { long vlanTag = Integer.parseInt(BroadcastDomainType.getValue(broadcastURI)); diff --git a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index 924a3b75dad..ca108749f01 100644 --- a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -206,11 +206,9 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", ntwkDevice.getNetworkServiceProvder(), pNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + - " is not added or in shutdown state in the physical network: " + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not added or in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), pNetwork)); } URI uri; @@ -386,7 +384,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl _networkExternalFirewallDao.remove(fwDeviceForNetwork.getId()); } } catch (Exception exception) { - logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); + logger.error("Failed to release firewall device for the network {} due to {}", network, exception.getMessage()); return false; } finally { deviceMapLock.unlock(); @@ -551,8 +549,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } String action = add ? "implemented" : "shut down"; - logger.debug("External firewall has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + - ") with VLAN tag " + guestVlanTag); + logger.debug("External firewall has {} the guest network for account {} with VLAN tag {}", action, account, guestVlanTag); return true; } @@ -572,8 +569,8 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; " + + "this network is not implemented. Skipping backend commands.", network); return true; } @@ -615,8 +612,8 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; " + + "this network is not implemented. Skipping backend commands.", network); return true; } @@ -761,7 +758,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } if (pNetwork.getVnet() == null) { - throw new CloudRuntimeException("Could not find vlan range for physical Network " + physicalNetworkId + "."); + throw new CloudRuntimeException("Could not find vlan range for physical Network " + pNetwork + "."); } Integer lowestVlanTag = null; List> vnetList = pNetwork.getVnet(); @@ -820,8 +817,8 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; " + + "this network is not implemented. Skipping backend commands.", network); return true; } diff --git a/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index 4298463fe6d..1ded4ecedc6 100644 --- a/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -244,11 +244,9 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", ntwkDevice.getNetworkServiceProvder(), pNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), pNetwork)); } if (gslbProvider) { @@ -518,12 +516,11 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { createLbAnswer = (CreateLoadBalancerApplianceAnswer)_agentMgr.easySend(lbProviderDevice.getHostId(), lbProvisionCmd); if (createLbAnswer == null || !createLbAnswer.getResult()) { - logger.error("Could not provision load balancer instance on the load balancer device " + lbProviderDevice.getId()); + logger.error("Could not provision load balancer instance on the load balancer device {}", lbProviderDevice); continue; } } catch (Exception agentException) { - logger.error("Could not provision load balancer instance on the load balancer device " + lbProviderDevice.getId() + " due to " + - agentException.getMessage()); + logger.error("Could not provision load balancer instance on the load balancer device {} due to {}", lbProviderDevice, agentException.getMessage()); continue; } @@ -581,10 +578,10 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } else { // release the public & private IP back to dc pool, as the load balancer appliance is now destroyed _dcDao.releasePrivateIpAddress(lbIP, guestConfig.getDataCenterId(), null); - _ipAddrMgr.disassociatePublicIpAddress(publicIp.getId(), _accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount()); + _ipAddrMgr.disassociatePublicIpAddress(publicIp, _accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount()); } } catch (Exception e) { - logger.warn("Failed to destroy load balancer appliance created for the network" + guestConfig.getId() + " due to " + e.getMessage()); + logger.warn("Failed to destroy load balancer appliance created for the network {} due to {}", guestConfig, e.getMessage()); } } } @@ -719,16 +716,16 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase try { answer = (DestroyLoadBalancerApplianceAnswer)_agentMgr.easySend(lbDevice.getParentHostId(), lbDeleteCmd); if (answer == null) { - logger.warn(String.format("Failed to destroy load balancer appliance used by the network [%s] due to a communication error with agent.", guestConfig.getId())); + logger.warn("Failed to destroy load balancer appliance used by the network [{}] due to a communication error with agent.", guestConfig); } else if (!answer.getResult()) { - logger.warn(String.format("Failed to destroy load balancer appliance used by the network [%s] due to [%s].", guestConfig.getId(), answer.getDetails())); + logger.warn("Failed to destroy load balancer appliance used by the network [{}] due to [{}].", guestConfig, answer.getDetails()); } } catch (Exception e) { - logger.warn("Failed to destroy load balancer appliance used by the network" + guestConfig.getId() + " due to " + e.getMessage()); + logger.warn("Failed to destroy load balancer appliance used by the network {} due to {}", guestConfig, e.getMessage()); } if (logger.isDebugEnabled()) { - logger.debug("Successfully destroyed load balancer appliance used for the network" + guestConfig.getId()); + logger.debug("Successfully destroyed load balancer appliance used for the network {}", guestConfig); } deviceMapLock.unlock(); @@ -741,18 +738,18 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // release the public IP allocated for this LB appliance DetailVO publicIpDetail = _hostDetailDao.findDetail(lbHost.getId(), "publicip"); IPAddressVO ipVo = _ipAddressDao.findByIpAndDcId(guestConfig.getDataCenterId(), publicIpDetail.toString()); - _ipAddrMgr.disassociatePublicIpAddress(ipVo.getId(), _accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount()); + _ipAddrMgr.disassociatePublicIpAddress(ipVo, _accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount()); } else { deviceMapLock.unlock(); } return true; } else { - logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + "as failed to acquire lock "); + logger.error("Failed to release load balancer device for the network {} as failed to acquire lock ", guestConfig); return false; } } catch (Exception exception) { - logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + " due to " + exception.getMessage()); + logger.error("Failed to release load balancer device for the network {} due to {}", guestConfig, exception.getMessage()); } finally { deviceMapLock.releaseRef(); } @@ -931,8 +928,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); if (network.getState() == Network.State.Allocated) { - logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External load balancer was asked to apply LB rules for network {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -1048,13 +1044,13 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // allocate a load balancer device for the network lbDeviceVO = allocateLoadBalancerForNetwork(guestConfig); if (lbDeviceVO == null) { - String msg = "failed to alloacate a external load balancer for the network " + guestConfig.getId(); + String msg = String.format("failed to allocate a external load balancer for the network %s", guestConfig); logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); - logger.debug("Allocated external load balancer device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); + logger.debug("Allocated external load balancer device: {} for the network: {}", lbDeviceVO, guestConfig); } else { // find the load balancer device allocated for the network ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(guestConfig); @@ -1128,7 +1124,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // release the load balancer allocated for the network boolean releasedLB = freeLoadBalancerForNetwork(guestConfig); if (!releasedLB) { - String msg = "Failed to release the external load balancer used for the network: " + guestConfig.getId(); + String msg = String.format("Failed to release the external load balancer used for the network: %s", guestConfig); logger.error(msg); } } @@ -1136,8 +1132,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (logger.isDebugEnabled()) { Account account = _accountDao.findByIdIncludingRemoved(guestConfig.getAccountId()); String action = add ? "implemented" : "shut down"; - logger.debug("External load balancer has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + - ") with VLAN tag " + guestVlanTag); + logger.debug("External load balancer has {} the guest network for account {} with VLAN tag {}", action, account, guestVlanTag); } return true; @@ -1194,20 +1189,20 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase List providers = _networkMgr.getProvidersForServiceInNetwork(network, Service.Firewall); //Only support one provider now if (providers == null) { - logger.error("Cannot find firewall provider for network " + network.getId()); + logger.error("Cannot find firewall provider for network {}", network); return null; } if (providers.size() != 1) { - logger.error("Found " + providers.size() + " firewall provider for network " + network.getId()); + logger.error("Found {} firewall provider for network {}", providers.size(), network); return null; } NetworkElement element = _networkModel.getElementImplementingProvider(providers.get(0).getName()); if (!(element instanceof IpDeployer)) { - logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); + logger.error("The firewall provider for network {} don't have ability to deploy IP address!", network); return null; } - logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); + logger.info("Let {} handle ip association for {} in network {}", element.getName(), getName(), network); return (IpDeployer)element; } @@ -1239,8 +1234,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); if (network.getState() == Network.State.Allocated) { - logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External load balancer was asked to apply LB rules for network {}; this network is not implemented. Skipping backend commands.", network); return null; } diff --git a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java index e6be174abcd..1fa416b643b 100644 --- a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java @@ -551,13 +551,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage private IpAddress allocateIP(Account ipOwner, boolean isSystem, long zoneId) throws InsufficientAddressCapacityException, ConcurrentOperationException { Account caller = CallContext.current().getCallingAccount(); - long callerUserId = CallContext.current().getCallingUserId(); + User callerUser = CallContext.current().getCallingUser(); // check permissions _accountMgr.checkAccess(caller, null, false, ipOwner); DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); - return allocateIp(ipOwner, isSystem, caller, callerUserId, zone, null, null); + return allocateIp(ipOwner, isSystem, caller, callerUser, zone, null, null); } // An IP association is required in below cases @@ -670,36 +670,37 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage return success; } - protected boolean cleanupIpResources(long ipId, long userId, Account caller) { + protected boolean cleanupIpResources(IpAddress ip, long userId, Account caller) { boolean success = true; + long ipId = ip.getId(); // Revoke all firewall rules for the ip try { - logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of public IP id=" + ipId + " release..."); - if (!_firewallMgr.revokeFirewallRulesForIp(ipId, userId, caller)) { - logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of ip release"); + logger.debug("Revoking all {} rules as a part of public IP {} release...", Purpose.Firewall, ip); + if (!_firewallMgr.revokeFirewallRulesForIp(ip, userId, caller)) { + logger.warn("Unable to revoke all the firewall rules for ip {} as a part of ip release", ip); success = false; } } catch (ResourceUnavailableException e) { - logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip {} as a part of ip release", ip, e); success = false; } // Revoke all PF/Static nat rules for the ip try { - logger.debug("Revoking all " + Purpose.PortForwarding + "/" + Purpose.StaticNat + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all {}/{} rules as a part of public IP {} release...", Purpose.PortForwarding, Purpose.StaticNat, ip); if (!_rulesMgr.revokeAllPFAndStaticNatRulesForIp(ipId, userId, caller)) { - logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the port forwarding rules for ip {} as a part of ip release", ip); success = false; } } catch (ResourceUnavailableException e) { - logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all the port forwarding rules for ip {} as a part of ip release", ip, e); success = false; } - logger.debug("Revoking all " + Purpose.LoadBalancing + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all {} rules as a part of public IP {} release...", Purpose.LoadBalancing, ip); if (!_lbMgr.removeAllLoadBalanacersForIp(ipId, caller, userId)) { - logger.warn("Unable to revoke all the load balancer rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the load balancer rules for ip {} as a part of ip release", ip); success = false; } @@ -707,11 +708,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // conditions // only when ip address failed to be cleaned up as a part of account destroy and was marked as Releasing, this part of // the code would be triggered - logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); + logger.debug("Cleaning up remote access vpns as a part of public IP {} release...", ip); try { _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller,false); } catch (ResourceUnavailableException e) { - logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to destroy remote access vpn for ip {} as a part of ip release", ip, e); success = false; } @@ -720,8 +721,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Override @DB - public boolean disassociatePublicIpAddress(long addrId, long userId, Account caller) { + public boolean disassociatePublicIpAddress(IpAddress ipAddress, long userId, Account caller) { boolean success = true; + long addrId = ipAddress.getId(); try { IPAddressVO ipToBeDisassociated = _ipAddressDao.acquireInLockTable(addrId); @@ -733,9 +735,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage PublicIpQuarantine publicIpQuarantine = null; // Cleanup all ip address resources - PF/LB/Static nat rules - if (!cleanupIpResources(addrId, userId, caller)) { + if (!cleanupIpResources(ipAddress, userId, caller)) { success = false; - logger.warn("Failed to release resources for ip address id=" + addrId); + logger.warn("Failed to release resources for ip address {}", ipAddress); } IPAddressVO ip = markIpAsUnavailable(addrId); @@ -744,7 +746,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } if (logger.isDebugEnabled()) { - logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); + logger.debug("Releasing ip {}; sourceNat = {}", ip, ip.isSourceNat()); } if (ip.getAssociatedWithNetworkId() != null) { @@ -768,7 +770,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (ip.isPortable()) { releasePortableIpAddress(addrId); } - logger.debug("Released a public ip id=" + addrId); + logger.debug("Released a public ip {}", ip); } else if (publicIpQuarantine != null) { removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it."); } @@ -1061,11 +1063,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } } } else { - logger.error("Failed to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); + logger.error("Failed to mark public IP as allocated: {}", addr); } } } else { - logger.error("Failed to acquire row lock to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); + logger.error("Failed to acquire row lock to mark public IP as allocated: {}", addr); } } }); @@ -1118,7 +1120,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage throw ex; } if (logger.isDebugEnabled()) { - logger.debug("lock account " + ownerId + " is acquired"); + logger.debug("lock account {} is acquired", owner); } List vlanDbIds = null; boolean displayIp = true; @@ -1139,19 +1141,19 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } }); if (ip.getState() != State.Allocated) { - logger.error("Failed to fetch new IP and allocate it for ip with id=" + ip.getId() + ", address=" + ip.getAddress()); + logger.error("Failed to fetch new IP and allocate it for ip: {}", ip); } return ip; } finally { if (owner != null) { if (logger.isDebugEnabled()) { - logger.debug("Releasing lock account " + ownerId); + logger.debug("Releasing lock account {}", owner); } _accountDao.releaseFromLockTable(ownerId); } if (ip == null) { - logger.error("Unable to get source nat ip address for account " + ownerId); + logger.error("Unable to get source nat ip address for account {}", owner); } } } @@ -1170,13 +1172,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage markPublicIpAsAllocated(addr); } else if (addr.getState() == IpAddress.State.Releasing) { // Cleanup all the resources for ip address if there are any, and only then un-assign ip in the system - if (cleanupIpResources(addr.getId(), Account.ACCOUNT_ID_SYSTEM, _accountMgr.getSystemAccount())) { + if (cleanupIpResources(addr, Account.ACCOUNT_ID_SYSTEM, _accountMgr.getSystemAccount())) { addPublicIpAddressToQuarantine(addr, network.getDomainId()); _ipAddressDao.unassignIpAddress(addr.getId()); messageBus.publish(_name, MESSAGE_RELEASE_IPADDR_EVENT, PublishScope.LOCAL, addr); } else { success = false; - logger.warn("Failed to release resources for ip address id=" + addr.getId()); + logger.warn("Failed to release resources for ip address: {}", addr); } } } @@ -1285,8 +1287,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } if (ipVO.getTakenAt() == null) { - logger.debug("Ip Address with id= " + id + " is not allocated, so do nothing."); - throw new CloudRuntimeException("Ip Address with id= " + id + " is not allocated, so do nothing."); + logger.debug("Ip Address {} is not allocated, so do nothing.", ipVO); + throw new CloudRuntimeException(String.format("Ip Address %s is not allocated, so do nothing.", ipVO)); } // Verify permission DataCenter zone = _entityMgr.findById(DataCenter.class, ipVO.getDataCenterId()); @@ -1303,7 +1305,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Account caller, long callerUserId, final DataCenter zone, final Boolean displayIp, final String ipaddress) + public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Account caller, User callerUser, final DataCenter zone, final Boolean displayIp, final String ipaddress) throws ConcurrentOperationException, InsufficientAddressCapacityException, CloudRuntimeException { final VlanType vlanType = VlanType.VirtualNetwork; @@ -1321,11 +1323,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage Account accountToLock = null; try { if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug(String.format("Associate IP address called by the user %s account %s", callerUser, ipOwner)); } accountToLock = _accountDao.acquireInLockTable(ipOwner.getId()); if (accountToLock == null) { - logger.warn("Unable to lock account: " + ipOwner.getId()); + logger.warn("Unable to lock account: {}", ipOwner); throw new ConcurrentOperationException("Unable to acquire account lock"); } @@ -1355,7 +1357,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage CallContext.current().setEventDetails("Ip Id: " + ip.getId()); Ip ipAddress = ip.getAddress(); - logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); + logger.debug("Got {} to assign for account {} in zone {}", ipAddress, ipOwner, zone); return ip; } @@ -1504,7 +1506,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } if (ipToAssoc.getAssociatedWithNetworkId() != null) { - logger.debug("IP " + ipToAssoc + " is already associated with network id=" + networkId); + logger.debug("IP {} is already associated with network {}", + ipToAssoc::toString, () -> _networksDao.findById(ipToAssoc.getAssociatedWithNetworkId())); return ipToAssoc; } @@ -1666,7 +1669,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } if (ipToAssoc.getAssociatedWithNetworkId() != network.getId()) { - throw new InvalidParameterValueException("IP " + ipToAssoc + " is not associated with network id" + networkId); + throw new InvalidParameterValueException(String.format("IP %s is not associated with network: %s", ipToAssoc, network)); } DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); @@ -1697,7 +1700,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (!ipToServices.isEmpty()) { Set services = ipToServices.get(publicIp); if (services != null && !services.isEmpty()) { - throw new InvalidParameterValueException("IP " + ipToAssoc + " has services and rules associated in the network " + networkId); + throw new InvalidParameterValueException(String.format("IP %s has services and rules associated in the network %s", ipToAssoc, network)); } } @@ -1769,7 +1772,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // disassociate portable IP with current network/VPC network if (srcNetwork.getVpcId() != null) { - _vpcMgr.unassignIPFromVpcNetwork(ipAddrId, currentNetworkId); + _vpcMgr.unassignIPFromVpcNetwork(ip, srcNetwork); } else { disassociatePortableIPToGuestNetwork(ipAddrId, currentNetworkId); } @@ -1830,12 +1833,12 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB public boolean associateIpAddressListToAccount(long userId, final long accountId, final long zoneId, final Long vlanId, final Network guestNetworkFinal) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, ResourceAllocationException { - final Account owner = _accountMgr.getActiveAccountById(accountId); - if (guestNetworkFinal != null && guestNetworkFinal.getTrafficType() != TrafficType.Guest) { throw new InvalidParameterValueException("Network " + guestNetworkFinal + " is not of a type " + TrafficType.Guest); } + final Account owner = _accountMgr.getActiveAccountById(accountId); + DataCenter zone = _dcDao.findById(zoneId); Ternary, Network> pair = null; try { pair = Transaction.execute(new TransactionCallbackWithException, Network>, Exception>() { @@ -1873,19 +1876,23 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() - + " as a part of createVlanIpRange process"); + logger.debug("Creating network for account {} from the network" + + " offering [{}] as a part of createVlanIpRange process", owner, requiredOfferings.get(0)); guestNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zoneId, ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null, null); if (guestNetwork == null) { - logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); - throw new CloudRuntimeException("Failed to create a Guest Isolated Networks with SourceNAT " - + "service enabled as a part of createVlanIpRange, for the account " + accountId + "in zone " + zoneId); + logger.warn("Failed to create default Virtual network for the account {} in zone {}", owner, zone); + throw new CloudRuntimeException(String.format("Failed to create a" + + " Guest Isolated Networks with SourceNAT service enabled " + + "as a part of createVlanIpRange, for the account %s in " + + "zone %s", owner, zone)); } } else { - throw new CloudRuntimeException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + throw new CloudRuntimeException(String.format( + "Required network offering [%s] is not in %s state", + requiredOfferings.get(0), NetworkOffering.State.Enabled)); } } @@ -1933,7 +1940,6 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // if the network offering has persistent set to true, implement the network if (createNetwork && requiredOfferings.get(0).isPersistent()) { - DataCenter zone = _dcDao.findById(zoneId); DeployDestination dest = new DeployDestination(zone, null, null, null); Account callerAccount = CallContext.current().getCallingAccount(); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); @@ -1965,7 +1971,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage final IPAddressVO ip = _ipAddressDao.findById(addrId); if (ip.getAllocatedToAccountId() == null && ip.getAllocatedTime() == null) { - logger.trace("Ip address id=" + addrId + " is already released"); + logger.trace("Ip address: {} is already released", ip); return ip; } @@ -2250,11 +2256,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage if (networkId != null) { if (ip.getSystem()) { CallContext ctx = CallContext.current(); - if (!disassociatePublicIpAddress(ip.getId(), ctx.getCallingUserId(), ctx.getCallingAccount())) { - logger.warn("Unable to release system ip address id=" + ip.getId()); + if (!disassociatePublicIpAddress(ip, ctx.getCallingUserId(), ctx.getCallingAccount())) { + logger.warn("Unable to release system ip address: {}", ip); success = false; } else { - logger.warn("Successfully released system ip address id=" + ip.getId()); + logger.warn("Successfully released system ip address: {}", ip); } } } @@ -2330,7 +2336,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage //Get ip address from the placeholder and don't allocate a new one if (requestedIpv4 != null && vm.getType() == VirtualMachine.Type.DomainRouter) { - logger.debug("There won't be nic assignment for VR id " + vm.getId() + " in this network " + network); + logger.debug("There won't be nic assignment for VR {} in this network {}", vm, network); } @@ -2421,7 +2427,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage networkId = ip.getSourceNetworkId(); } if (networkId == null) { - throw new CloudRuntimeException("No network for IP " + ip.getId()); + throw new CloudRuntimeException(String.format("No network for IP %s", ip)); } NetworkDetailVO networkDetail = _networkDetailsDao.findDetail(networkId, Network.hideIpAddressUsage); return networkDetail != null && "true".equals(networkDetail.getValue()); diff --git a/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java b/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java index 4cee7423cbf..d4c293ecf88 100644 --- a/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java @@ -105,7 +105,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa public String acquireGuestIpv6Address(Network network, String requestedIpv6) throws InsufficientAddressCapacityException { if (!_networkModel.areThereIPv6AddressAvailableInNetwork(network.getId())) { throw new InsufficientAddressCapacityException( - String.format("There is no IPv6 address available in the network [name=%s, network id=%s]", network.getName(), network.getId()), DataCenter.class, + String.format("There is no IPv6 address available in the network [name=%s, id=%s, uuid=%s]", network.getName(), network.getId(), network.getUuid()), DataCenter.class, network.getDataCenterId()); } @@ -123,7 +123,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa if (ip != null) { State ipState = ip.getState(); if (ipState != State.Free) { - throw new InsufficientAddressCapacityException(String.format("Requested ip address [%s] is not free [ip state=%]", requestedIpv6, ipState), DataCenter.class, + throw new InsufficientAddressCapacityException(String.format("Requested ip address [%s] is not free [ip state=%s]", requestedIpv6, ipState), DataCenter.class, network.getDataCenterId()); } } @@ -158,19 +158,22 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa protected void checkIfCanAllocateIpv6Address(Network network, String ipv6) throws InsufficientAddressCapacityException { if (isIp6Taken(network, ipv6)) { throw new InsufficientAddressCapacityException( - String.format("The IPv6 address [%s] is already in use in the network [id=%s, name=%s]", ipv6, network.getId(), network.getName()), Network.class, + String.format("The IPv6 address [%s] is already in use in the network [id=%s, uuid=%s, name=%s]", + ipv6, network.getId(), network.getUuid(), network.getName()), Network.class, network.getId()); } if (ipAddressManager.isIpEqualsGatewayOrNetworkOfferingsEmpty(network, ipv6)) { throw new InvalidParameterValueException( - String.format("The network [id=%s] offering is empty or the requested IP address [%s] is equals to the Gateway", network.getId(), ipv6)); + String.format("The network [id=%s, uuid=%s, name=%s] offering is empty or the requested IP address [%s] is equals to the Gateway", + network.getId(), network.getUuid(), network.getName(), ipv6)); } String networkIp6Cidr = network.getIp6Cidr(); if (!NetUtils.isIp6InNetwork(ipv6, networkIp6Cidr)) { throw new InvalidParameterValueException( - String.format("The IPv6 address [%s] is not in the network [id=%s, name=%s, ipv6cidr=%s]", ipv6, network.getId(), network.getName(), network.getIp6Cidr())); + String.format("The IPv6 address [%s] is not in the network [id=%s, uuid=%s name=%s, ipv6cidr=%s]", + ipv6, network.getId(), network.getUuid(), network.getName(), network.getIp6Cidr())); } } @@ -210,7 +213,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa setNicPropertiesFromNetwork(nic, network); IPv6Address ipv6addr = NetUtils.EUI64Address(network.getIp6Cidr(), nic.getMacAddress()); - logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); + logger.info("Calculated IPv6 address {} using EUI-64 for NIC {}", ipv6addr, nic); nic.setIPv6Address(ipv6addr.toString()); if (nic.getIPv4Address() != null) { diff --git a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java index d5b3cab44a6..464f8c90ebb 100644 --- a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java @@ -35,6 +35,9 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.network.dao.PhysicalNetworkDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.command.admin.network.CreateGuestNetworkIpv6PrefixCmd; import org.apache.cloudstack.api.command.admin.network.DeleteGuestNetworkIpv6PrefixCmd; @@ -53,7 +56,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Resource; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterGuestIpv6Prefix; @@ -118,6 +120,8 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi @Inject VlanDao vlanDao; @Inject + DataCenterDao zoneDao; + @Inject DataCenterGuestIpv6PrefixDao dataCenterGuestIpv6PrefixDao; @Inject Ipv6GuestPrefixSubnetNetworkMapDao ipv6GuestPrefixSubnetNetworkMapDao; @@ -130,6 +134,8 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi @Inject NicDao nicDao; @Inject + PhysicalNetworkDao physicalNetworkDao; + @Inject DomainRouterDao domainRouterDao; @Inject AccountManager accountManager; @@ -159,8 +165,8 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi NicVO nic = nicOptional.get(); Optional vlanOptional = ranges.stream().filter(v -> nic.getIPv6Cidr().equals(v.getIp6Cidr()) && nic.getIPv6Gateway().equals(v.getIp6Gateway())).findFirst(); if (vlanOptional.isEmpty()) { - logger.error(String.format("Public IPv6 placeholder NIC with cidr: %s, gateway: %s for network ID: %d is not present in the allocated VLAN: %s", - nic.getIPv6Cidr(), nic.getIPv6Gateway(),network.getId(), ranges.get(0).getVlanTag())); + logger.error("Public IPv6 placeholder NIC {} with cidr: {}, gateway: {} for network: {} is not present in the allocated VLAN: {}", + nic, nic.getIPv6Cidr(), nic.getIPv6Gateway(), network, ranges.get(0).getVlanTag()); return null; } return new Pair<>(nic.getIPv6Address(), vlanOptional.get()); @@ -205,9 +211,11 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi private Pair assignPublicIpv6ToNetworkInternal(Network network, String vlanId, String nicMacAddress) throws InsufficientAddressCapacityException { final List ranges = vlanDao.listIpv6RangeByZoneIdAndVlanId(network.getDataCenterId(), vlanId); if (CollectionUtils.isEmpty(ranges)) { - logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlanId)); + DataCenterVO zone = zoneDao.findById(network.getDataCenterId()); + logger.error("Unable to find IPv6 address for zone: {}, physical network: {}, VLAN: {}", + zone, physicalNetworkDao.findById(network.getPhysicalNetworkId()), vlanId); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, network.getDataCenterId()); - ex.addProxyObject(ApiDBUtils.findZoneById(network.getDataCenterId()).getUuid()); + ex.addProxyObject(zone.getUuid()); throw ex; } Pair placeholderResult = getPublicIpv6FromNetworkPlaceholder(network, ranges); @@ -330,11 +338,11 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi return new Pair<>(used, total); } - public Pair preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException { + public Pair preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException { return Transaction.execute((TransactionCallbackWithException, ResourceAllocationException>) status -> { - List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId); + List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId()); if (CollectionUtils.isEmpty(prefixes)) { - logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", zoneId)); + logger.error("IPv6 prefixes not found for the zone: {}", zone); throw new ResourceAllocationException("Unable to allocate IPv6 network", Resource.ResourceType.network); } Ipv6GuestPrefixSubnetNetworkMapVO ip6Subnet = null; @@ -492,7 +500,7 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi public void checkNetworkIpv6Upgrade(Network network) throws InsufficientAddressCapacityException, ResourceAllocationException { List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(network.getDataCenterId()); if (CollectionUtils.isEmpty(prefixes)) { - logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", network.getDataCenterId())); + logger.error("IPv6 prefixes not found for the zone: {}", zoneDao.findById(network.getDataCenterId())); throw new ResourceAllocationException("Unable to allocate IPv6 network", Resource.ResourceType.network); } List addresses = network.getVpcId() == null ? @@ -502,9 +510,11 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi VlanVO vlan = vlanDao.findById(address.getVlanId()); final List ranges = vlanDao.listIpv6RangeByZoneIdAndVlanId(network.getDataCenterId(), vlan.getVlanTag()); if (CollectionUtils.isEmpty(ranges)) { - logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlan.getVlanTag())); + DataCenterVO zone = zoneDao.findById(network.getDataCenterId()); + logger.error("Unable to find IPv6 address for zone: {}, physical network: {}, VLAN: {}", + zone, physicalNetworkDao.findById(network.getPhysicalNetworkId()), vlan.getVlanTag()); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, network.getDataCenterId()); - ex.addProxyObject(ApiDBUtils.findZoneById(network.getDataCenterId()).getUuid()); + ex.addProxyObject(zone.getUuid()); throw ex; } } @@ -576,7 +586,7 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi if (!supportedProtocols.contains(protocol.toLowerCase())) { throw new InvalidParameterValueException(String.format("Protocol %s is not supported in zone", protocol)); } else if (!supportedTrafficTypes.contains(trafficType.toString().toLowerCase())) { - throw new InvalidParameterValueException("Traffic Type " + trafficType + " is currently supported by Firewall in network " + networkId); + throw new InvalidParameterValueException(String.format("Traffic Type %s is currently supported by Firewall in network %s", trafficType, network)); } } @@ -653,9 +663,9 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi return false; } if (!FirewallRule.Purpose.Ipv6Firewall.equals(rule.getPurpose())) { - logger.error(String.format("Cannot apply IPv6 firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Ipv6Firewall)); + logger.error("Cannot apply IPv6 firewall rule: {} as purpose {} is not {}", rule, rule.getPurpose(), FirewallRule.Purpose.Ipv6Firewall); } - logger.debug(String.format("Applying IPv6 firewall rules for rule with ID: %s", rule.getUuid())); + logger.debug(String.format("Applying IPv6 firewall rules for rule: %s", rule)); List rules = firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), FirewallRule.Purpose.Ipv6Firewall, FirewallRule.TrafficType.Ingress)); return firewallManager.applyFirewallRules(rules, false, CallContext.current().getCallingAccount()); diff --git a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java index 39546dc9061..91e4fddb69c 100644 --- a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java @@ -183,7 +183,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { @Override public long makeCopyOfNetwork(Network network, NetworkOffering networkOffering, Long vpcId) { if (logger.isDebugEnabled()) { - logger.debug("Making a copy of network with uuid " + network.getUuid() + " and id " + network.getId() + " for migration."); + logger.debug("Making a copy of network {} for migration.", network); } long originalNetworkId = network.getId(); NetworkDomainVO domainNetworkMapByNetworkId = _networkDomainDao.getDomainNetworkMapByNetworkId(originalNetworkId); @@ -235,7 +235,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { _networksDao.update(networkCopyId, copiedNetwork); copyNetworkDetails(originalNetworkId, networkCopyId); - copyFirewallRulesToNewNetwork(network, networkCopyId); + copyFirewallRulesToNewNetwork(network, copiedNetwork); assignUserNicsToNewNetwork(originalNetworkId, networkCopyId); assignRouterNicsToNewNetwork(network.getId(), networkCopyId); @@ -287,7 +287,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { public Long makeCopyOfVpc(long vpcId, long vpcOfferingId) { VpcVO vpc = _vpcDao.findById(vpcId); if (logger.isDebugEnabled()) { - logger.debug("Making a copy of vpc with uuid " + vpc.getUuid() + " and id " + vpc.getId() + " for migration."); + logger.debug("Making a copy of vpc {} for migration.", vpc); } if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Specified vpc id doesn't exist in the system"); @@ -393,11 +393,11 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { } } - private void copyFirewallRulesToNewNetwork(Network srcNetwork, long dstNetworkId) { + private void copyFirewallRulesToNewNetwork(Network srcNetwork, Network dstNetwork) { List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(srcNetwork.getId(), FirewallRule.Purpose.Firewall, FirewallRule.TrafficType.Egress); firewallRules.addAll(_firewallDao.listByNetworkPurposeTrafficType(srcNetwork.getId(), FirewallRule.Purpose.Firewall, FirewallRule.TrafficType.Ingress)); if (logger.isDebugEnabled()) { - logger.debug("Copying firewall rules from network with id " + srcNetwork.getId() + " to network with id " + dstNetworkId); + logger.debug("Copying firewall rules from network {} to network {}", srcNetwork, dstNetwork); } //Loop over all the firewall rules in the original network and copy all values to a new firewall rule @@ -408,7 +408,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { originalFirewallRule.getSourcePortStart(), originalFirewallRule.getSourcePortEnd(), originalFirewallRule.getProtocol(), - dstNetworkId, + dstNetwork.getId(), srcNetwork.getAccountId(), srcNetwork.getDomainId(), originalFirewallRule.getPurpose(), @@ -613,11 +613,11 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { ipAddress.setAssociatedWithNetworkId(networkInNewPhysicalNet.getId()); _ipAddressDao.persist(ipAddress); } else { - _ipAddressManager.disassociatePublicIpAddress(ipAddress.getId(), callerUserId, caller); + _ipAddressManager.disassociatePublicIpAddress(ipAddress, callerUserId, caller); } } - _rulesMgr.applyStaticNatsForNetwork(networkInNewPhysicalNet.getId(), false, networkAccount); + _rulesMgr.applyStaticNatsForNetwork(networkInNewPhysicalNet, false, networkAccount); } private void copyNicDetails(long originalNicId, long dstNicId) { diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 1276ec22067..5f6cae31699 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -635,7 +635,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } } else { if (network.getCidr() == null) { - logger.debug("Network - " + network.getId() + " has NULL CIDR."); + logger.debug("Network - {} has NULL CIDR.", network); return false; } hasFreeIps = (getAvailableIps(network, null)).size() > 0; @@ -979,7 +979,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi List virtualNetworks = _networksDao.listByZoneAndGuestType(accountId, dataCenterId, GuestType.Isolated, false); if (virtualNetworks.isEmpty()) { - logger.trace("Unable to find default Virtual network account id=" + accountId); + logger.trace("Unable to find default Virtual network for account: {}", () -> _accountDao.findById(accountId)); return null; } @@ -990,7 +990,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi if (networkElementNic != null) { return networkElementNic.getIPv4Address(); } else { - logger.warn("Unable to set find network element for the network id=" + virtualNetwork.getId()); + logger.warn("Unable to set find network element for the network {}", virtualNetwork); return null; } } @@ -1224,13 +1224,14 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi Long pNtwkId = null; for (PhysicalNetwork pNtwk : pNtwks) { if (tag == null && pNtwk.getTags().isEmpty()) { - logger.debug("Found physical network id=" + pNtwk.getId() + " with null tag"); + logger.debug("Found physical network {} with null tag", pNtwk); if (pNtwkId != null) { - throw new CloudRuntimeException("There is more than 1 physical network with empty tag in the zone id=" + zoneId); + throw new CloudRuntimeException(String.format("There is more than 1 physical" + + " network with empty tag in the zone %s", _dcDao.findById(zoneId))); } pNtwkId = pNtwk.getId(); } else if (tag != null && pNtwk.getTags().contains(tag)) { - logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); + logger.debug("Found physical network {} based on requested tags {}", pNtwk, tag); pNtwkId = pNtwk.getId(); break; } @@ -1275,7 +1276,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi physicalNetworkId = findPhysicalNetworkId(network.getDataCenterId(), null, null); } - return isServiceEnabledInNetwork(physicalNetworkId, network.getId(), SecurityGroup); + return isServiceEnabledInNetwork(physicalNetworkId, network, SecurityGroup); } @Override @@ -1327,8 +1328,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + - ex.getMessage()); + logger.debug("Failed to retrieve the default label for management " + + "traffic: zone: {} hypervisor: {} due to: {}", + () -> _dcDao.findById(zoneId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1356,8 +1358,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for storage traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + - ex.getMessage()); + logger.debug("Failed to retrieve the default label for storage " + + "traffic: zone: {} hypervisor: {} due to: {}", + () -> _dcDao.findById(zoneId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1393,7 +1396,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi public boolean isProviderEnabledInPhysicalNetwork(long physicalNetowrkId, String providerName) { PhysicalNetworkServiceProviderVO ntwkSvcProvider = _pNSPDao.findByServiceProvider(physicalNetowrkId, providerName); if (ntwkSvcProvider == null) { - logger.warn("Unable to find provider " + providerName + " in physical network id=" + physicalNetowrkId); + logger.warn("Unable to find provider {} in physical network {}", + providerName::toString, () -> _physicalNetworkDao.findById(physicalNetowrkId)); return false; } return isProviderEnabled(ntwkSvcProvider); @@ -1697,7 +1701,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi if (network == null) { throw new CloudRuntimeException("cannot check permissions on (Network) "); } - logger.info(String.format("Checking permission for account %s (%s) on network %s (%s)", caller.getAccountName(), caller.getUuid(), network.getName(), network.getUuid())); + logger.info("Checking permission for account {} on network {}", caller, network); if (network.getGuestType() != GuestType.Shared || network.getAclType() == ACLType.Account) { checkAccountNetworkPermissions(caller, network); @@ -1837,7 +1841,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi throw new PermissionDeniedException(String.format("Shared network %s belongs to domain cannot be operated by normal user %s", network, owner)); } } else if (owner.getType() != Account.Type.ADMIN) { - throw new PermissionDeniedException(String.format("Shared network %s cannot be operated by account %s with type = %d", network, owner, owner.getType())); + throw new PermissionDeniedException(String.format("Shared network %s cannot be operated by account %s with type = %s", network, owner, owner.getType())); } } @@ -1879,8 +1883,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrieve the default label for public traffic." + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to: " + - ex.getMessage()); + logger.debug("Failed to retrieve the default label for public " + + "traffic. zone: {} hypervisor: {} due to: {}", + () -> _dcDao.findById(dcId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1908,8 +1913,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for guest traffic:" + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to:" + - ex.getMessage()); + logger.debug("Failed to retrieve the default label for guest " + + "traffic: zone: {} hypervisor: {} due to:{}", + () -> _dcDao.findById(dcId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1981,13 +1987,13 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi Long networkDomainId = null; Network network = getNetwork(networkId); if (network.getGuestType() != GuestType.Shared) { - logger.trace("Network id=" + networkId + " is not shared"); + logger.trace("Network {} is not shared", network); return false; } NetworkDomainVO networkDomainMap = _networkDomainDao.getDomainNetworkMapByNetworkId(networkId); if (networkDomainMap == null) { - logger.trace("Network id=" + networkId + " is shared, but not domain specific"); + logger.trace("Network {} is shared, but not domain specific", network); return true; } else { networkDomainId = networkDomainMap.getDomainId(); @@ -2077,17 +2083,18 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi return true; } - boolean isServiceEnabledInNetwork(long physicalNetworkId, long networkId, Service service) { + boolean isServiceEnabledInNetwork(long physicalNetworkId, Network network, Service service) { // check if the service is supported in the network - if (!areServicesSupportedInNetwork(networkId, service)) { - logger.debug("Service " + service.getName() + " is not supported in the network id=" + networkId); + if (!areServicesSupportedInNetwork(network.getId(), service)) { + logger.debug("Service {} is not supported in the network {}", service.getName(), network); return false; } // get provider for the service and check if all of them are supported - String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(networkId, service); + String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), service); if (!isProviderEnabledInPhysicalNetwork(physicalNetworkId, provider)) { - logger.debug("Provider " + provider + " is not enabled in physical network id=" + physicalNetworkId); + logger.debug("Provider {} is not enabled in physical network {}", + provider::toString, () -> _physicalNetworkDao.findById(physicalNetworkId)); return false; } @@ -2111,7 +2118,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } if (networkList.size() > 1) { - logger.info("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType + ". "); + logger.info("More than one physical networks exist in zone {} with traffic type {}", + () -> _dcDao.findById(zoneId), trafficType::toString); } return networkList.get(0); @@ -2568,7 +2576,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi // The active nics count (nics_count in op_networks table) might be wrong due to some reasons, should check the state of vms as well. // (nics for Starting VMs might not be allocated yet as Starting state also used when vm is being Created) if (_nicDao.countNicsForNonStoppedVms(networkId) > 0 || _nicDao.countNicsForNonStoppedRunningVrs(networkId) > 0) { - logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are not Stopped at the moment"); + logger.debug("Network {} is not ready for GC as it has vms that are not Stopped at the moment", network); return false; } diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index ffb0be83494..c6628e457de 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -717,7 +717,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C throws ResourceAllocationException, InsufficientAddressCapacityException, ConcurrentOperationException { Account caller = CallContext.current().getCallingAccount(); - long callerUserId = CallContext.current().getCallingUserId(); + User callerUser = CallContext.current().getCallingUser(); DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); if (networkId != null) { @@ -735,9 +735,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug("Associate IP address called by the user {} account {}", callerUser, ipOwner); } - return _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUserId, zone, displayIp, ipaddress); + return _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUser, zone, displayIp, ipaddress); } else { throw new InvalidParameterValueException("Associate IP address can only be called on the shared networks in the advanced zone" + " with Firewall/Source Nat/Static Nat/Port Forwarding/Load balancing services enabled"); @@ -748,7 +748,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C _accountMgr.checkAccess(caller, null, false, ipOwner); } - IpAddress address = _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUserId, zone, displayIp, ipaddress); + IpAddress address = _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUser, zone, displayIp, ipaddress); if (address != null) { CallContext.current().putContextParameter(IpAddress.class, address.getUuid()); } @@ -782,7 +782,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug("Associate IP address called by the user {} account {}", CallContext.current().getCallingUser(), ipOwner); } return _ipAddrMgr.allocatePortableIp(ipOwner, caller, zoneId, networkId, null); } else { @@ -910,7 +910,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C int maxAllowedIpsPerNic = NumbersUtil.parseInt(_configDao.getValue(Config.MaxNumberOfSecondaryIPsPerNIC.key()), 10); Long nicWiseIpCount = _nicSecondaryIpDao.countByNicId(nicId); if (nicWiseIpCount.intValue() >= maxAllowedIpsPerNic) { - logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"" + maxAllowedIpsPerNic + " per Nic has been crossed for the nic " + nicId + "."); + logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"{} per Nic has been crossed for the nic {}.", maxAllowedIpsPerNic, nicVO); throw new InsufficientAddressCapacityException("Maximum Number of Ips per Nic has been crossed.", Nic.class, nicId); } @@ -945,10 +945,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C ipaddr = _ipAddrMgr.allocatePublicIpForGuestNic(network, podId, ipOwner, ipv4Address); } if (ipaddr == null && ipv6Address == null) { - throw new InvalidParameterValueException("Allocating ip to guest nic " + nicId + " failed"); + throw new InvalidParameterValueException(String.format("Allocating ip to guest nic %s failed", nicVO)); } } catch (InsufficientAddressCapacityException e) { - logger.error("Allocating ip to guest nic " + nicId + " failed"); + logger.error("Allocating ip to guest nic {} failed", nicVO); return null; } } else { @@ -1017,7 +1017,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(network.getNetworkOfferingId()); Long nicId = secIpVO.getNicId(); - logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); + logger.debug("ip = {} nic = {}", secIpVO::toString, () -> _nicDao.findById(nicId)); //check is this the last secondary ip for NIC List ipList = _nicSecondaryIpDao.listByNicId(nicId); boolean lastIp = false; @@ -1031,7 +1031,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C throw new InvalidParameterValueException("Invalid zone Id is given"); } - logger.debug("Calling secondary ip " + secIpVO.getIp4Address() + " release "); + logger.debug("Calling secondary ip {} release ", secIpVO); if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { //check PF or static NAT is configured on this IP address String secondaryIp = secIpVO.getIp4Address(); @@ -1048,8 +1048,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C //check if the secondary IP associated with any static nat rule IPAddressVO publicIpVO = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secondaryIp); if (publicIpVO != null) { - logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); - throw new InvalidParameterValueException("Can' remove the ip " + secondaryIp + "is associate with static NAT rule public IP address id " + publicIpVO.getId()); + logger.debug("VM nic IP {} is associated with the static NAT rule public IP address id {}", secondaryIp, publicIpVO); + throw new InvalidParameterValueException(String.format("Can' remove the ip %s is associate with static NAT rule public IP address id %s", secondaryIp, publicIpVO)); } if (_loadBalancerDao.isLoadBalancerRulesMappedToVmGuestIp(vm.getId(), secondaryIp, network.getId())) { @@ -1269,7 +1269,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } if (ipVO.getAllocatedTime() == null) { - logger.debug("Ip Address id= " + ipAddressId + " is not allocated, so do nothing."); + logger.debug("Ip Address {} is not allocated, so do nothing.", ipVO); return true; } @@ -1310,7 +1310,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C return true; } - boolean success = _ipAddrMgr.disassociatePublicIpAddress(ipAddressId, userId, caller); + boolean success = _ipAddrMgr.disassociatePublicIpAddress(ipVO, userId, caller); if (success) { _resourceTagDao.removeByIdAndType(ipAddressId, ResourceObjectType.PublicIpAddress); @@ -1323,7 +1323,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } } else { - logger.warn("Failed to release public ip address id=" + ipAddressId); + logger.warn("Failed to release public ip address {}", ipVO); } return success; } @@ -1668,7 +1668,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (zone.getNetworkType() == NetworkType.Advanced && ntwkOff.getGuestType() == GuestType.Isolated) { ipv6 = _networkOfferingDao.isIpv6Supported(ntwkOff.getId()); if (ipv6) { - ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(zone.getId()); + ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(zone); ip6Gateway = ip6GatewayCidr.first(); ip6Cidr = ip6GatewayCidr.second(); } @@ -2292,7 +2292,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C Long aclVpcId = acl.getVpcId(); if (!isDefaultAcl(aclId) && isAclAttachedToVpc(aclVpcId, vpcId)) { - throw new InvalidParameterValueException(String.format("ACL [%s] does not belong to the VPC [%s].", aclId, aclVpcId)); + throw new InvalidParameterValueException(String.format("ACL [%s] does not belong to the VPC [%s].", acl, aclVpcId)); } } network = _vpcMgr.createVpcGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, @@ -2913,9 +2913,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C long id = network.getId(); boolean success = _networkMgr.restartNetwork(id, callerAccount, user, cleanup, livePatch); if (success) { - logger.debug(String.format("Network id=%d is restarted successfully.",id)); + logger.debug("Network {} is restarted successfully.", network); } else { - logger.warn(String.format("Network id=%d failed to restart.",id)); + logger.warn("Network {} failed to restart.", network); } return success; @@ -3239,7 +3239,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C String isUpdateDnsSupported = dnsCapabilities.get(Capability.AllowDnsSuffixModification); if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) { // TBD: use uuid instead of networkOfferingId. May need to hardcode tablename in call to addProxyObject(). - throw new InvalidParameterValueException("Domain name change is not supported by the network offering id=" + networkOfferingId); + throw new InvalidParameterValueException(String.format("Domain name change is not supported by the network offering %s", networkOffering)); } network.setNetworkDomain(domainSuffix); @@ -3427,7 +3427,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (restartNetwork) { if (validStateToShutdown) { if (!changeCidr) { - logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down elements and resources for network {} as a part of network update", network); if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); @@ -3437,12 +3437,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } else { // We need to shutdown the network, since we want to re-implement the network. - logger.debug("Shutting down network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down network {} as a part of network update", network); //check if network has reservation if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { - logger.warn( - "Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation."); + logger.warn("Existing IP reservation will become ineffective for the network {} You need to reapply reservation after network reimplementation.", network); //set cidr to the network cidr network.setCidr(network.getNetworkCidr()); //set networkCidr to null to bring network back to no IP reservation state @@ -3494,7 +3493,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C long vmId = nic.getInstanceId(); VMInstanceVO vm = _vmDao.findById(vmId); if (vm == null) { - logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); + logger.error("Vm for nic {} not found with Vm Id: {}", nic, vmId); continue; } long isDefault = (nic.isDefaultNic()) ? 1 : 0; @@ -3685,7 +3684,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } else if (!isIpv6Supported && isIpv6SupportedNew) { Pair ip6GatewayCidr; try { - ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(network.getDataCenterId()); + ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(_dcDao.findById(network.getDataCenterId())); ipv6Service.assignIpv6SubnetToNetwork(ip6GatewayCidr.second(), network.getId()); } catch (ResourceAllocationException ex) { throw new CloudRuntimeException("unable to allocate IPv6 network", ex); @@ -3937,7 +3936,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C Vpc migratedVpc = _vpcDao.findById(migratedVpcId); if (migratedVpc.getVpcOfferingId() != vpcOfferingId) { logger.error("The vpc is already partially migrated in a previous run. The provided vpc offering is not the same as the one used during the first migration process."); - throw new InvalidParameterValueException("Failed to resume migrating VPC as VPC offering does not match previously specified VPC offering (" + migratedVpc.getVpcOfferingId() + ")"); + throw new InvalidParameterValueException(String.format("Failed to resume migrating VPC as VPC offering does not match previously specified VPC offering (%s)", + _vpcOfferingDao.findById(migratedVpc.getVpcOfferingId()))); } List migratedTiers = _networksDao.listByVpc(migratedVpcId); @@ -4041,19 +4041,19 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C // Type of the network should be the same if (oldNetworkOffering.getGuestType() != newNetworkOffering.getGuestType()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " are of different types, can't upgrade"); + logger.debug("Network offerings {} and {} are of different types, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } // Traffic types should be the same if (oldNetworkOffering.getTrafficType() != newNetworkOffering.getTrafficType()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different traffic types, can't upgrade"); + logger.debug("Network offerings {} and {} have different traffic types, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } // specify ipRanges should be the same if (oldNetworkOffering.isSpecifyIpRanges() != newNetworkOffering.isSpecifyIpRanges()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyIpRangess, can't upgrade"); + logger.debug("Network offerings {} and {} have different values for specifyIpRangess, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } @@ -4088,26 +4088,26 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C // security group service should be the same if (areServicesSupportedByNetworkOffering(oldNetworkOfferingId, Service.SecurityGroup) != areServicesSupportedByNetworkOffering(newNetworkOfferingId, Service.SecurityGroup)) { - logger.debug("Offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different securityGroupProperty, can't upgrade"); + logger.debug("Offerings {} and {} have different securityGroupProperty, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } // tags should be the same if (newNetworkOffering.getTags() != null) { if (oldNetworkOffering.getTags() == null) { - logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); + logger.debug("New network offering id={} has tags and old network offering id={} doesn't, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } if (!com.cloud.utils.StringUtils.areTagsEqual(oldNetworkOffering.getTags(), newNetworkOffering.getTags())) { - logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); + logger.debug("Network offerings {} and {} have different tags, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } } // specify vlan should be the same if (oldNetworkOffering.isSpecifyVlan() != newNetworkOffering.isSpecifyVlan()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyVlan, can't upgrade"); + logger.debug("Network offerings {} and {} have different values for specifyVlan, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } @@ -4115,7 +4115,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C NetworkOffering.NetworkMode oldNetworkMode = oldNetworkOffering.getNetworkMode() == null ? NetworkOffering.NetworkMode.NATTED: oldNetworkOffering.getNetworkMode(); NetworkOffering.NetworkMode newNetworkMode = newNetworkOffering.getNetworkMode() == null ? NetworkOffering.NetworkMode.NATTED: newNetworkOffering.getNetworkMode(); if (!oldNetworkMode.equals(newNetworkMode)) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for network mode, can't upgrade"); + logger.debug("Network offerings {} and {} have different values for network mode, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } @@ -4140,7 +4140,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (Grouping.AllocationState.Enabled == zone.getAllocationState()) { // TBD: Send uuid instead of zoneId; may have to hardcode tablename in call to addProxyObject(). - throw new PermissionDeniedException("Cannot create PhysicalNetwork since the Zone is currently enabled, zone Id: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot create PhysicalNetwork since the Zone is currently enabled, zone: %s", zone)); } NetworkType zoneType = zone.getNetworkType(); @@ -4148,7 +4148,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (zoneType == NetworkType.Basic) { if (!_physicalNetworkDao.listByZone(zoneId).isEmpty()) { // TBD: Send uuid instead of zoneId; may have to hardcode tablename in call to addProxyObject(). - throw new CloudRuntimeException("Cannot add the physical network to basic zone id: " + zoneId + ", there is a physical network already existing in this basic Zone"); + throw new CloudRuntimeException(String.format("Cannot add the physical network to basic zone: %s, there is a physical network already existing in this basic Zone", zone)); } } if (tags != null && tags.size() > 1) { @@ -4399,15 +4399,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { + DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); if (addVnetsFinal != null) { - logger.debug("Adding vnet range " + addVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() - + " as a part of updatePhysicalNetwork call"); + logger.debug("Adding vnet range {} for the physicalNetwork {} and zone {} as a part of updatePhysicalNetwork call", addVnetsFinal.toString(), network, zone); //add vnet takes a list of strings to be added. each string is a vnet. _dcDao.addVnet(network.getDataCenterId(), network.getId(), addVnetsFinal); } if (removeVnetsFinal != null) { - logger.debug("removing vnet range " + removeVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() - + " as a part of updatePhysicalNetwork call"); + logger.debug("removing vnet range {} for the physicalNetwork {} and zone {} as a part of updatePhysicalNetwork call", removeVnetsFinal.toString(), network, zone); //deleteVnets takes a list of strings to be removed. each string is a vnet. _dcVnetDao.deleteVnets(TransactionLegacy.currentTxn(), network.getDataCenterId(), network.getId(), removeVnetsFinal); } @@ -4443,14 +4442,15 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C maxVnet = MAX_VXLAN_VNI; // fail if zone already contains VNI, need to be unique per zone. // since adding a range adds each VNI to the database, need only check min/max + DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); for (String vnet : VnetRange) { - logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); + logger.debug("Looking to see if VNI {} already exists on another network in zone {}", vnet, zone); List vnis = _dcVnetDao.findVnet(network.getDataCenterId(), vnet); if (vnis != null && !vnis.isEmpty()) { for (DataCenterVnetVO vni : vnis) { if (vni.getPhysicalNetworkId() != network.getId()) { - logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); - throw new InvalidParameterValueException("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); + logger.debug("VNI {} already exists on another network in zone ({}), please specify a unique range", vnet, zone); + throw new InvalidParameterValueException(String.format("VNI %s already exists on another network in zone (%s), please specify a unique range", vnet, zone)); } } } @@ -4489,9 +4489,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } public void validateIfServiceOfferingIsActiveAndSystemVmTypeIsDomainRouter(final Long serviceOfferingId) { - logger.debug(String.format("Validating if service offering [%s] is active, and if system VM is of Domain Router type.", serviceOfferingId)); final ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + logger.debug(String.format("Validating if service offering (%s) with id %d is active, and if system VM is of Domain Router type.", serviceOffering, serviceOfferingId)); + if (serviceOffering == null) { throw new InvalidParameterValueException(String.format("Could not find specified service offering [%s].", serviceOfferingId)); } @@ -4545,7 +4546,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C removeVnets.addAll(vnetsInDb); int allocated_vnets = _dcVnetDao.countAllocatedVnets(network.getId()); if (allocated_vnets > 0) { - throw new InvalidParameterValueException("physicalnetwork " + network.getId() + " has " + allocated_vnets + " vnets in use"); + throw new InvalidParameterValueException(String.format("physicalnetwork %s has %d vnets in use", network, allocated_vnets)); } return removeVnets; } @@ -4569,7 +4570,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C _dcVnetDao.lockRange(network.getDataCenterId(), network.getId(), start, end); List result = _dcVnetDao.listAllocatedVnetsInRange(network.getDataCenterId(), network.getId(), start, end); if (!result.isEmpty()) { - throw new InvalidParameterValueException("physicalnetwork " + network.getId() + " has allocated vnets in the range " + start + "-" + end); + throw new InvalidParameterValueException(String.format("physicalnetwork %s has allocated vnets in the range %d-%d", network, start, end)); } // If the range is partially dedicated to an account fail the request @@ -4640,11 +4641,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C for (PhysicalNetworkServiceProviderVO provider : providers) { try { deleteNetworkServiceProvider(provider.getId()); - } catch (ResourceUnavailableException e) { - logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); - return false; - } catch (ConcurrentOperationException e) { - logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); + } catch (ResourceUnavailableException | ConcurrentOperationException e) { + logger.warn("Unable to complete destroy of the physical network provider: {}", provider, e); return false; } } @@ -4785,7 +4783,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } else if (!physicalNetwork.getIsolationMethods().isEmpty() && !physicalNetwork.getIsolationMethods().contains("VLAN") && !physicalNetwork.getIsolationMethods().contains("VXLAN")) { - throw new InvalidParameterValueException("Cannot dedicate guest vlan range. " + "Physical isolation type of network " + physicalNetworkId + " is not VLAN nor VXLAN"); + throw new InvalidParameterValueException(String.format("Cannot dedicate guest vlan range. Physical isolation type of network %s is not VLAN nor VXLAN", physicalNetwork)); } // Get the start and end vlan @@ -5052,8 +5050,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } if (_pNSPDao.findByServiceProvider(physicalNetworkId, providerName) != null) { - // TBD: send uuid instead of physicalNetworkId. - throw new CloudRuntimeException("The '" + providerName + "' provider already exists on physical network : " + physicalNetworkId); + throw new CloudRuntimeException(String.format("The '%s' provider already exists on physical network : %s", providerName, network)); } // check if services can be turned off @@ -5164,7 +5161,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (state != null) { if (logger.isDebugEnabled()) { - logger.debug("trying to update the state of the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId() + " to state: " + stateStr); + logger.debug("trying to update the state of the service provider {} on physical network: {} to state: {}", + provider::toString, () -> _physicalNetworkDao.findById(provider.getPhysicalNetworkId()), stateStr::toString); } switch (state) { case Enabled: @@ -5231,7 +5229,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C // shutdown the provider instances ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); if (logger.isDebugEnabled()) { - logger.debug("Shutting down the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId()); + logger.debug("Shutting down the service provider {} on physical network: {}", + provider::toString, () -> _physicalNetworkDao.findById(provider.getPhysicalNetworkId())); } NetworkElement element = _networkModel.getElementImplementingProvider(provider.getProviderName()); if (element == null) { @@ -5291,8 +5290,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } if (networkWithoutTagCount > 0) { logger.error("Number of physical networks without tags are " + networkWithoutTagCount); - throw new CloudRuntimeException("There are more than 1 physical network without tags in the zone= " + - physicalNetwork.getDataCenterId()); + throw new CloudRuntimeException(String.format("There are more than 1 physical network without tags in the zone: %s", + _dcDao.findById(physicalNetwork.getDataCenterId()))); } } @@ -5533,7 +5532,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } OvsProviderVO element = _ovsProviderDao.findByNspId(nsp.getId()); if (element != null) { - logger.debug("There is already a Ovs element with service provider id " + nsp.getId()); + logger.debug("There is already a Ovs element with service provider {}", nsp); return nsp; } element = new OvsProviderVO(nsp.getId()); @@ -5799,8 +5798,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } else { logger.debug("Private network already exists: " + privateNetwork); //Do not allow multiple private gateways with same Vlan within a VPC - throw new InvalidParameterValueException("Private network for the vlan: " + uriString + " and cidr " + cidr + " already exists " + "for Vpc " + vpcId + " in zone " - + _entityMgr.findById(DataCenter.class, pNtwk.getDataCenterId()).getName()); + throw new InvalidParameterValueException(String.format("Private network for the vlan: %s and cidr %s already exists for Vpc %s in zone %s", + uriString, cidr, _vpcDao.findById(vpcId), _entityMgr.findById(DataCenter.class, pNtwk.getDataCenterId()).getName())); } if (vpcId != null) { //add entry to private_ip_address table @@ -5930,10 +5929,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C throw new InvalidParameterValueException("Invalid zone Id "); } if (_accountMgr.checkAccessAndSpecifyAuthority(caller, zone.getId()) != zone.getId()) { - throw new InvalidParameterValueException("Caller does not have permission for this Zone" + "(" + zoneId + ")"); + throw new InvalidParameterValueException(String.format("Caller does not have permission for this Zone (%s)", zone)); } if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug("Associate IP address called by the user {} account {}", CallContext.current().getCallingUser(), ipOwner); } return _ipAddrMgr.allocatePodIp(zoneId, podId); @@ -6128,7 +6127,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } if (!_projectMgr.canAccessProjectAccount(caller, project.getProjectAccountId())) { - throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + projectId); + throw new InvalidParameterValueException(String.format("Account %s can't access project id=%s", caller, project.getUuid())); } accountIds.add(project.getProjectAccountId()); } diff --git a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java index 59e21dc9c77..0aee5f23496 100644 --- a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java @@ -132,16 +132,13 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage long zoneId = cmd.getZoneId(); DataCenterVO zone = _dcDao.findById(zoneId); - String zoneName; if (zone == null) { throw new InvalidParameterValueException("Could not find zone with ID: " + zoneId); - } else { - zoneName = zone.getName(); } List trafficMonitorsInZone = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.TrafficMonitor, zoneId); if (trafficMonitorsInZone.size() != 0) { - throw new InvalidParameterValueException("Already added an traffic monitor in zone: " + zoneName); + throw new InvalidParameterValueException(String.format("Already added an traffic monitor in zone: %s", zone)); } URI uri; @@ -266,19 +263,24 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage } @Override - @DB public boolean processAnswers(long agentId, long seq, Answer[] answers) { + return processAnswers(agentId, null, null, seq, answers); + } + + @Override + @DB + public boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { /* * Do not collect Direct Network usage stats if the Traffic Monitor is not owned by this mgmt server */ HostVO host = _hostDao.findById(agentId); if (host != null) { if ((host.getManagementServerId() == null) || (mgmtSrvrId != host.getManagementServerId())) { - logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : {}", host); return false; } } else { - logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor [id: {}, uuid: {}, name: {}", agentId, uuid, name); return false; } @@ -303,7 +305,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage final long zoneId = host.getDataCenterId(); final DetailVO lastCollectDetail = _detailsDao.findDetail(host.getId(), "last_collection"); if (lastCollectDetail == null) { - logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: " + host.getId()); + logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: {}", host); return false; } Date lastCollection = new Date(Long.parseLong(lastCollectDetail.getValue())); @@ -377,7 +379,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage DirectNetworkUsageAnswer answer = (DirectNetworkUsageAnswer)_agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; - String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; + String msg = String.format("Unable to get network usage stats from %s due to: %s.", host, details); logger.error(msg); return false; } else { @@ -410,7 +412,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage DirectNetworkUsageAnswer answer = (DirectNetworkUsageAnswer)_agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; - String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; + String msg = String.format("Unable to get network usage stats from %s due to: %s.", host, details); logger.error(msg); return false; } else { @@ -475,8 +477,13 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage @Override public boolean processDisconnect(long agentId, Status state) { + return processDisconnect(agentId, null, null, state); + } + + @Override + public boolean processDisconnect(long agentId, String uuid, String name, Status state) { if (logger.isDebugEnabled()) { - logger.debug("Disconnected called on " + agentId + " with status " + state.toString()); + logger.debug("Disconnected called on [id: {}, uuid: {}, name: {}] with status {}", agentId, uuid, name, state.toString()); } return true; } @@ -488,13 +495,12 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if (cmd instanceof StartupTrafficMonitorCommand) { - long agentId = agent.getId(); - logger.debug("Sending RecurringNetworkUsageCommand to " + agentId); + logger.debug("Sending RecurringNetworkUsageCommand to {}", agent); RecurringNetworkUsageCommand watch = new RecurringNetworkUsageCommand(_interval); try { - _agentMgr.send(agentId, new Commands(watch), this); + _agentMgr.send(agent.getId(), new Commands(watch), this); } catch (AgentUnavailableException e) { - logger.debug("Can not process connect for host " + agentId, e); + logger.debug("Can not process connect for host {}", agent, e); } } return; diff --git a/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java b/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java index d922f8d0018..06ccc1a63f7 100644 --- a/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java +++ b/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java @@ -59,9 +59,14 @@ public class SshKeysDistriMonitor implements Listener { } @Override - public synchronized boolean processDisconnect(long agentId, Status state) { + public boolean processDisconnect(long agentId, Status state) { + return processDisconnect(agentId, null, null, state); + } + + @Override + public synchronized boolean processDisconnect(long agentId, String uuid, String name, Status state) { if (logger.isTraceEnabled()) - logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); + logger.trace("Agent disconnected, agent [id: {}, uuid: {}, name: {}, state: {}]. Will notify waiters", agentId, uuid, name, state); return true; } @@ -93,7 +98,7 @@ public class SshKeysDistriMonitor implements Listener { Commands c = new Commands(cmds); _agentMgr.send(host.getId(), c, this); } catch (AgentUnavailableException e) { - logger.debug("Failed to send keys to agent: " + host.getId()); + logger.debug("Failed to send keys to agent: {}", host); } } } diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java index cf6aab6a7bb..04d4c8d2d62 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.network.as; +import com.cloud.user.Account; import org.apache.cloudstack.framework.config.ConfigKey; public interface AutoScaleManager extends AutoScaleService { @@ -40,9 +41,9 @@ public interface AutoScaleManager extends AutoScaleService { void checkAutoScaleUser(Long autoscaleUserId, long accountId); - boolean deleteAutoScaleVmGroupsByAccount(Long accountId); + boolean deleteAutoScaleVmGroupsByAccount(Account account); - void cleanUpAutoScaleResources(Long accountId); + void cleanUpAutoScaleResources(Account account); void doScaleUp(long groupId, Integer numVm); diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 5e7a4a0c4ef..3d3a28d1404 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -590,7 +590,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } profileVO = checkValidityAndPersist(profileVO, true); - logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId()); + logger.info("Successfully create AutoScale Vm Profile: {}", profileVO); return profileVO; } @@ -671,7 +671,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } vmProfile = checkValidityAndPersist(vmProfile, false); - logger.info("Updated Auto Scale Vm Profile id:" + vmProfile.getId()); + logger.info("Updated Auto Scale Vm Profile: {}", vmProfile); return vmProfile; } @@ -680,14 +680,14 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEVMPROFILE_DELETE, eventDescription = "deleting autoscale vm profile") public boolean deleteAutoScaleVmProfile(long id) { /* Check if entity is in database */ - getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Vm Profile", id, autoScaleVmProfileDao); + AutoScaleVmProfileVO vmProfile = getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Vm Profile", id, autoScaleVmProfileDao); if (autoScaleVmGroupDao.isProfileInUse(id)) { throw new InvalidParameterValueException("Cannot delete AutoScale Vm Profile when it is in use by one more vm groups"); } boolean success = autoScaleVmProfileDao.remove(id); if (success) { - logger.info("Successfully deleted AutoScale Vm Profile with Id: " + id); + logger.info("Successfully deleted AutoScale Vm Profile: {}", vmProfile); } return success; } @@ -821,7 +821,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage AutoScalePolicyVO policyVO = new AutoScalePolicyVO(cmd.getName(), cmd.getDomainId(), cmd.getAccountId(), duration, quietTime, null, scaleAction); policyVO = checkValidityAndPersist(policyVO, cmd.getConditionIds()); - logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId()); + logger.info("Successfully created AutoScale Policy: {}", policyVO); return policyVO; } @@ -830,7 +830,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEPOLICY_DELETE, eventDescription = "deleting autoscale policy") public boolean deleteAutoScalePolicy(final long id) { /* Check if entity is in database */ - getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Policy", id, autoScalePolicyDao); + AutoScalePolicyVO policy = getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Policy", id, autoScalePolicyDao); if (autoScaleVmGroupPolicyMapDao.isAutoScalePolicyInUse(id)) { throw new InvalidParameterValueException("Cannot delete AutoScale Policy when it is in use by one or more AutoScale Vm Groups"); @@ -850,7 +850,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage logger.warn("Failed to remove AutoScale Policy Condition mappings"); return false; } - logger.info("Successfully deleted autoscale policy id : " + id); + logger.info("Successfully deleted autoscale policy: {}", policy); return success; } @@ -1006,7 +1006,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } policy = checkValidityAndPersist(policy, conditionIds); - logger.info("Successfully updated Auto Scale Policy id:" + policyId); + logger.info("Successfully updated Auto Scale Policy: {}", policy); if (CollectionUtils.isNotEmpty(conditionIds)) { markStatisticsAsInactive(null, policyId); @@ -1049,7 +1049,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } vmGroupVO = checkValidityAndPersist(vmGroupVO, cmd.getScaleUpPolicyIds(), cmd.getScaleDownPolicyIds()); - logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId()); + logger.info("Successfully created Autoscale Vm Group: {}", vmGroupVO); createInactiveDummyRecord(vmGroupVO.getId()); scheduleMonitorTask(vmGroupVO.getId()); @@ -1097,12 +1097,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } if (!autoScaleVmGroupVO.getState().equals(AutoScaleVmGroup.State.DISABLED) && !Boolean.TRUE.equals(cleanup)) { - throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group id : %d because it is in %s state. Please disable it or pass cleanup=true flag which will destroy all VMs.", id, autoScaleVmGroupVO.getState())); + throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group : %s because it is in %s state. Please disable it or pass cleanup=true flag which will destroy all VMs.", autoScaleVmGroupVO, autoScaleVmGroupVO.getState())); } Integer currentVM = autoScaleVmGroupVmMapDao.countByGroup(id); if (currentVM > 0 && !Boolean.TRUE.equals(cleanup)) { - throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group id : %d because there are %d VMs. Please remove the VMs or pass cleanup=true flag which will destroy all VMs.", id, currentVM)); + throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group : %s because there are %d VMs. Please remove the VMs or pass cleanup=true flag which will destroy all VMs.", autoScaleVmGroupVO, currentVM)); } AutoScaleVmGroup.State bakupState = autoScaleVmGroupVO.getState(); @@ -1125,7 +1125,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage autoScaleVmGroupDao.persist(autoScaleVmGroupVO); } finally { if (!success) { - logger.warn("Could not delete AutoScale Vm Group id : " + id); + logger.warn("Could not delete AutoScale Vm Group : {}", autoScaleVmGroupVO); return false; } } @@ -1163,7 +1163,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage return false; } - logger.info("Successfully deleted autoscale vm group id : " + id); + logger.info("Successfully deleted autoscale vm group: {}", autoScaleVmGroupVO); return success; // Successfull } }); @@ -1368,7 +1368,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage vmGroupVO = checkValidityAndPersist(vmGroupVO, scaleUpPolicyIds, scaleDownPolicyIds); if (vmGroupVO != null) { - logger.debug("Updated Auto Scale VmGroup id:" + vmGroupId); + logger.debug("Updated Auto Scale VmGroup: {}", vmGroupVO); if ((interval != null && interval != currentInterval) || CollectionUtils.isNotEmpty(scaleUpPolicyIds) || CollectionUtils.isNotEmpty(scaleDownPolicyIds)) { markStatisticsAsInactive(vmGroupId, null); @@ -1404,10 +1404,10 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - logger.warn("Failed to enable AutoScale Vm Group id : " + id); + logger.warn("Failed to enable AutoScale Vm Group: {}", vmGroup); return null; } - logger.info("Successfully enabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully enabled AutoScale Vm Group: {}", vmGroup); createInactiveDummyRecord(vmGroup.getId()); } return vmGroup; @@ -1439,10 +1439,10 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - logger.warn("Failed to disable AutoScale Vm Group id : " + id); + logger.warn("Failed to disable AutoScale Vm Group: {}", vmGroup); return null; } - logger.info("Successfully disabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully disabled AutoScale Vm Group: {}", vmGroup); } return vmGroup; } @@ -1505,7 +1505,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage ConditionVO condition = null; condition = conditionDao.persist(new ConditionVO(cid, threshold, owner.getAccountId(), owner.getDomainId(), op)); - logger.info("Successfully created condition with Id: " + condition.getId()); + logger.info("Successfully created condition: {}", condition); CallContext.current().setEventDetails(" Id: " + condition.getId()); return condition; @@ -1581,13 +1581,13 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage ConditionVO condition = conditionDao.findByCounterId(counterId); if (condition != null) { - logger.info("Cannot delete counter " + counter.getName() + " as it is being used in a condition."); + logger.info("Cannot delete counter {} as it is being used in a condition.", counter); throw new ResourceInUseException("Counter is in use."); } boolean success = counterDao.remove(counterId); if (success) { - logger.info("Successfully deleted counter with Id: " + counterId); + logger.info("Successfully deleted counter: {}", counter); } return success; @@ -1604,12 +1604,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage // Verify if condition is used in any autoscale policy if (autoScalePolicyConditionMapDao.isConditionInUse(conditionId)) { - logger.info("Cannot delete condition " + conditionId + " as it is being used in a condition."); + logger.info("Cannot delete condition {} as it is being used in a condition.", condition); throw new ResourceInUseException("Cannot delete Condition when it is in use by one or more AutoScale Policies."); } boolean success = conditionDao.remove(conditionId); if (success) { - logger.info("Successfully deleted condition " + condition.getId()); + logger.info("Successfully deleted condition {}", condition); } return success; } @@ -1656,7 +1656,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage sc2.setJoinParameters("policySearch", "policyId", policyIds.toArray((new Object[policyIds.size()]))); List groups = autoScaleVmGroupDao.search(sc2, null); if (CollectionUtils.isNotEmpty(groups)) { - String msg = String.format("Cannot update condition %d as it is being used in %d vm groups NOT in Disabled state.", conditionId, groups.size()); + String msg = String.format("Cannot update condition %s as it is being used in %d vm groups NOT in Disabled state.", condition, groups.size()); logger.info(msg); throw new ResourceInUseException(msg); } @@ -1666,7 +1666,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage condition.setThreshold(threshold); boolean success = conditionDao.update(conditionId, condition); if (success) { - logger.info("Successfully updated condition " + condition.getId()); + logger.info("Successfully updated condition {}", condition); for (Long policyId : policyIds) { markStatisticsAsInactive(null, policyId); @@ -1676,16 +1676,16 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } @Override - public boolean deleteAutoScaleVmGroupsByAccount(Long accountId) { + public boolean deleteAutoScaleVmGroupsByAccount(Account account) { boolean success = true; - List groups = autoScaleVmGroupDao.listByAccount(accountId); + List groups = autoScaleVmGroupDao.listByAccount(account.getId()); for (AutoScaleVmGroupVO group : groups) { - logger.debug("Deleting AutoScale Vm Group " + group + " for account Id: " + accountId); + logger.debug("Deleting AutoScale Vm Group {} for account: {}", group, account); try { deleteAutoScaleVmGroup(group.getId(), true); - logger.debug("AutoScale Vm Group " + group + " has been successfully deleted for account Id: " + accountId); + logger.debug("AutoScale Vm Group {} has been successfully deleted for account: {}", group, account); } catch (Exception e) { - logger.warn("Failed to delete AutoScale Vm Group " + group + " for account Id: " + accountId + " due to: ", e); + logger.warn("Failed to delete AutoScale Vm Group {} for account: {} due to: ", group, account, e); success = false; } } @@ -1693,20 +1693,20 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } @Override - public void cleanUpAutoScaleResources(Long accountId) { + public void cleanUpAutoScaleResources(Account account) { // cleans Autoscale VmProfiles, AutoScale Policies and Conditions belonging to an account int count = 0; - count = autoScaleVmProfileDao.removeByAccountId(accountId); + count = autoScaleVmProfileDao.removeByAccountId(account.getId()); if (count > 0) { - logger.debug("Deleted " + count + " AutoScale Vm Profile for account Id: " + accountId); + logger.debug("Deleted {} AutoScale Vm Profile for account: {}", count, account); } - count = autoScalePolicyDao.removeByAccountId(accountId); + count = autoScalePolicyDao.removeByAccountId(account.getId()); if (count > 0) { - logger.debug("Deleted " + count + " AutoScale Policies for account Id: " + accountId); + logger.debug("Deleted {} AutoScale Policies for account: {}", count, account); } - count = conditionDao.removeByAccountId(accountId); + count = conditionDao.removeByAccountId(account.getId()); if (count > 0) { - logger.debug("Deleted " + count + " Conditions for account Id: " + accountId); + logger.debug("Deleted {} Conditions for account: {}", count, account); } } @@ -1743,12 +1743,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage return deployParams; } - protected long createNewVM(AutoScaleVmGroupVO asGroup) { + protected UserVm createNewVM(AutoScaleVmGroupVO asGroup) { AutoScaleVmProfileVO profileVo = autoScaleVmProfileDao.findById(asGroup.getProfileId()); long templateId = profileVo.getTemplateId(); long serviceOfferingId = profileVo.getServiceOfferingId(); if (templateId == -1) { - return -1; + return null; } // create new VM into DB try { @@ -1778,7 +1778,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage if (!zone.isLocalStorageEnabled()) { if (diskOffering.isUseLocalStorage()) { - throw new InvalidParameterValueException("Zone is not configured to use local storage but disk offering " + diskOffering.getName() + " associated to the service offering " + serviceOffering.getName() + " uses it"); + throw new InvalidParameterValueException(String.format("Zone is not configured to use local storage but disk offering %s associated to the service offering %s uses it", diskOffering, serviceOffering)); } } @@ -1828,11 +1828,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } } - if (vm != null) { - return vm.getId(); - } else { - return -1; - } + return vm; } catch (InsufficientCapacityException ex) { logger.info(ex); logger.trace(ex.getMessage(), ex); @@ -1974,10 +1970,10 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } } - private boolean startNewVM(long vmId) { + private UserVmVO startNewVM(long vmId) { try { CallContext.current().setEventDetails("Vm Id: " + vmId); - userVmMgr.startVirtualMachine(vmId, null, new HashMap<>(), null); + return userVmMgr.startVirtualMachine(vmId, null, new HashMap<>(), null).first(); } catch (final ResourceUnavailableException ex) { logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); @@ -1998,7 +1994,6 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage logger.info(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } - return true; } private boolean assignLBruleToNewVm(long vmId, AutoScaleVmGroupVO asGroup) { @@ -2056,7 +2051,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage AutoScaleVmGroup.State oldState = asGroup.getState(); AutoScaleVmGroup.State newState = AutoScaleVmGroup.State.SCALING; if (!autoScaleVmGroupDao.updateState(groupId, oldState, newState)) { - logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); + logger.error("Can not update vmgroup state from {} to {}, groupId: {}", oldState, newState, asGroup); return; } try { @@ -2064,23 +2059,22 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage ActionEventUtils.onStartedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, "Scaling Up AutoScale VM group " + groupId, groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), true, 0); - long vmId = createNewVM(asGroup); - if (vmId == -1) { - logger.error("Can not deploy new VM for scaling up in the group " - + asGroup.getId() + ". Waiting for next round"); + UserVm vm = createNewVM(asGroup); + if (vm == null) { + logger.error("Can not deploy new VM for scaling up in the group {}. Waiting for next round", asGroup); break; } // persist to DB - AutoScaleVmGroupVmMapVO groupVmMapVO = new AutoScaleVmGroupVmMapVO(asGroup.getId(), vmId); + AutoScaleVmGroupVmMapVO groupVmMapVO = new AutoScaleVmGroupVmMapVO(asGroup.getId(), vm.getId()); autoScaleVmGroupVmMapDao.persist(groupVmMapVO); // Add an Inactive-dummy record to statistics table createInactiveDummyRecord(asGroup.getId()); try { - startNewVM(vmId); + startNewVM(vm.getId()); createInactiveDummyRecord(asGroup.getId()); - if (assignLBruleToNewVm(vmId, asGroup)) { + if (assignLBruleToNewVm(vm.getId(), asGroup)) { // update last_quietTime List groupPolicyVOs = autoScaleVmGroupPolicyMapDao .listByVmGroupId(groupId); @@ -2094,25 +2088,24 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } } ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, - String.format("Started and assigned LB rule for VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Started and assigned LB rule for VM %s in AutoScale VM group %s", vm, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } else { logger.error("Can not assign LB rule for this new VM"); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, - String.format("Failed to assign LB rule for VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to assign LB rule for VM %s in AutoScale VM group %s", vm, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); break; } } catch (ServerApiException e) { - logger.error("Can not deploy new VM for scaling up in the group " - + asGroup.getId() + ". Waiting for next round"); + logger.error("Can not deploy new VM for scaling up in the group {}. Waiting for next round", asGroup); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, - String.format("Failed to start VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); - destroyVm(vmId); + String.format("Failed to start VM %s in AutoScale VM group %s", vm, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + destroyVm(vm.getId()); break; } } } finally { if (!autoScaleVmGroupDao.updateState(groupId, newState, oldState)) { - logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); + logger.error("Can not update vmgroup state from {} back to {}, group: {}", newState, oldState, asGroup); } } } @@ -2130,20 +2123,20 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage AutoScaleVmGroup.State oldState = asGroup.getState(); AutoScaleVmGroup.State newState = AutoScaleVmGroup.State.SCALING; if (!autoScaleVmGroupDao.updateState(groupId, oldState, newState)) { - logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); + logger.error("Can not update vmgroup state from {} to {}, groupId: {}", oldState, newState, asGroup); return; } ActionEventUtils.onStartedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - "Scaling down AutoScale VM group " + groupId, groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), + String.format("Scaling down AutoScale VM group %s", asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), true, 0); try { long vmId = -1; try { vmId = removeLBrule(asGroup); } catch (Exception ex) { - logger.info("Got exception when remove LB rule for a VM in AutoScale VM group %d: " + groupId, ex); + logger.info("Got exception when remove LB rule for a VM in AutoScale VM group: {}", asGroup, ex); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Failed to remove LB rule for a VM in AutoScale VM group %d", groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to remove LB rule for a VM in AutoScale VM group %s", asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); throw ex; } if (vmId != -1) { @@ -2178,19 +2171,19 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } if (destroyVm(vmId)) { ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Destroyed VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Destroyed VM %d in AutoScale VM group %s", vmId, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } else { ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Failed to destroy VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to destroy VM %d in AutoScale VM group %s", vmId, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } } else { logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more."); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Failed to remove LB rule for a VM in AutoScale VM group %d", groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to remove LB rule for a VM in AutoScale VM group %s", asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } } finally { if (!autoScaleVmGroupDao.updateState(groupId, newState, oldState)) { - logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); + logger.error("Can not update vmgroup state from {} back to {}, groupId: {}", newState, oldState, asGroup); } } } @@ -2328,7 +2321,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } protected AutoScalePolicy.Action getAutoscaleAction(Map countersMap, Map countersNumberMap, AutoScaleVmGroupTO groupTO) { - logger.debug("[AutoScale] Getting autoscale action for group : " + groupTO.getId()); + logger.debug("[AutoScale] Getting autoscale action for group [id={}, uuid={}]", groupTO.getId(), groupTO.getUuid()); Network.Provider provider = getLoadBalancerServiceProvider(groupTO.getLoadBalancerId()); @@ -2367,10 +2360,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } Double sum = countersMap.get(key); Integer number = countersNumberMap.get(key); - logger.debug(String.format("Checking policyId = %d, conditionId = %d, counter = \"%s\", sum = %f, number = %s", policyTO.getId(), conditionTO.getId(), counter.getName(), sum, number)); + logger.debug("Checking policy [id: {}, uuid: {}], condition [id: {}, uuid: {}], counter = \"{}\", sum = {}, number = {}", + policyTO.getId(), policyTO.getUuid(), conditionTO.getId(), conditionTO.getUuid(), counter.getName(), sum, number); if (number == null || number == 0) { bValid = false; - logger.debug(String.format("Skipping policyId = %d, conditionId = %d, counter = \"%s\" because the number is %s", policyTO.getId(), conditionTO.getId(), counter.getName(), number)); + logger.debug("Skipping policy [id: {}, uuid: {}], condition [id: {}, uuid: {}], counter = \"{}\" because the number is {}", + policyTO.getId(), policyTO.getUuid(), conditionTO.getId(), conditionTO.getUuid(), counter.getName(), number); break; } Double avg = sum / number; @@ -2381,9 +2376,9 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage || ((op == com.cloud.network.as.Condition.Operator.LE) && (avg.doubleValue() <= thresholdPercent.doubleValue())) || ((op == com.cloud.network.as.Condition.Operator.LT) && (avg.doubleValue() < thresholdPercent.doubleValue())); - logger.debug(String.format("Check result on policyId = %d, conditionId = %d, counter = %s is : %s" + - " (actual result = %f, operator = %s, threshold = %f)", - policyTO.getId(), conditionTO.getId(), counter.getSource(), bConditionCheck, avg, op, thresholdPercent)); + logger.debug("Check result on policy [id: {}, uuid: {}], condition [id: {}, uuid: {}], counter = {} is : {}" + + " (actual result = {}, operator = {}, threshold = {})", + policyTO.getId(), policyTO.getUuid(), conditionTO.getId(), conditionTO.getUuid(), counter.getSource(), bConditionCheck, avg, op, thresholdPercent); if (!bConditionCheck) { bValid = false; @@ -2391,7 +2386,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } } AutoScalePolicy.Action action = bValid ? policyTO.getAction() : null; - logger.debug(String.format("Check result on policyId = %d is %s", policyTO.getId(), action)); + logger.debug("Check result on policy [id: {}, uuid: {}] is {}", policyTO.getId(), policyTO.getUuid(), action); return action; } @@ -2423,7 +2418,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } Network network = networkDao.findById(loadBalancer.getNetworkId()); if (network == null) { - throw new CloudRuntimeException(String.format("Unable to find network with id: %s ", loadBalancer.getNetworkId())); + throw new CloudRuntimeException(String.format("Unable to find network with id: %s for load balancer: %s", loadBalancer.getNetworkId(), loadBalancer)); } return network; } @@ -2435,7 +2430,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } IPAddressVO ipAddress = ipAddressDao.findById(loadBalancer.getSourceIpAddressId()); if (ipAddress == null) { - throw new CloudRuntimeException(String.format("Unable to find IP Address with id: %s ", loadBalancer.getSourceIpAddressId())); + throw new CloudRuntimeException(String.format("Unable to find IP Address with id: %s for load balancer: %s", loadBalancer.getSourceIpAddressId(), loadBalancer)); } return new Pair<>(ipAddress.getAddress().addr(), loadBalancer.getSourcePortStart()); } @@ -2528,7 +2523,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage AutoScalePolicy.Action scaleAction = getAutoscaleAction(countersMap, countersNumberMap, groupTO); if (scaleAction != null) { - logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: {} for group {}", scaleAction, asGroup); if (AutoScalePolicy.Action.SCALEUP.equals(scaleAction)) { doScaleUp(asGroup.getId(), 1); } else { @@ -2574,13 +2569,14 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage Long counterId = Long.parseLong(counterVm[1]); - Long conditionId = Long.parseLong(params.get("con" + counterVm[0])); + ConditionTO condition = new ConditionTO(Long.parseLong(params.get("con" + counterVm[0])), null, 0L, null, null); - Long policyId = 0L; // For NetScaler, the policyId is not returned in PerformanceMonitorAnswer + // For NetScaler, the policyId is not returned in PerformanceMonitorAnswer + AutoScalePolicyTO policy = new AutoScalePolicyTO(0L, null, 0, 0, null, null, null, false); Double coVal = Double.parseDouble(counterVals[1]); - updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counterId, conditionId, policyId, coVal, AutoScaleValueType.INSTANT_VM); + updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counterId, condition, policy, coVal, AutoScaleValueType.INSTANT_VM); } catch (Exception e) { logger.error("Cannot process PerformanceMonitorAnswer due to Exception: ", e); @@ -2590,9 +2586,9 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } protected void updateCountersMapWithInstantData(Map countersMap, Map countersNumberMap, AutoScaleVmGroupTO groupTO, - Long counterId, Long conditionId, Long policyId, Double coVal, AutoScaleValueType valueType) { + Long counterId, ConditionTO condition, AutoScalePolicyTO policy, Double coVal, AutoScaleValueType valueType) { // Summary of all counter by counterId key - String key = generateKeyFromPolicyAndConditionAndCounter(policyId, conditionId, counterId); + String key = generateKeyFromPolicyAndConditionAndCounter(policy.getId(), condition.getId(), counterId); CounterVO counter = counterDao.findById(counterId); if (counter == null) { @@ -2614,7 +2610,9 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage if (AutoScaleValueType.INSTANT_VM_GROUP.equals(valueType)) { Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(groupTO.getId()); if (currentVM == 0) { - logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupTO.getId(), policyId, counterId)); + logger.debug("Skipping updating countersMap for group [id={}, uuid={}] " + + "and policy [id={}, uuid={}] and counter [id={}, uuid={}] due to no VMs", + groupTO.getId(), groupTO.getUuid(), policy.getId(), policy.getUuid(), counter.getId(), counter.getUuid()); return; } coVal = coVal / currentVM; @@ -2682,7 +2680,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage // get scale action AutoScalePolicy.Action scaleAction = getAutoscaleAction(countersMap, countersNumberMap, groupTO); if (scaleAction != null) { - logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: {} for group {}", scaleAction, asGroup); if (AutoScalePolicy.Action.SCALEUP.equals(scaleAction)) { doScaleUp(asGroup.getId(), 1); } else { @@ -2722,7 +2720,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage logger.warn("Got empty result for virtual machine statistics from host: " + host); } } catch (Exception e) { - logger.debug("Failed to get VM stats from host : " + host.getName()); + logger.debug("Failed to get VM stats from host : {}", host); } return vmStatsById; } @@ -2779,7 +2777,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage command.setWait(30); GetAutoScaleMetricsAnswer answer = (GetAutoScaleMetricsAnswer) agentMgr.easySend(router.getHostId(), command); if (answer == null || !answer.getResult()) { - logger.error("Failed to get autoscale metrics from virtual router " + router.getName()); + logger.error("Failed to get autoscale metrics from virtual router {}", router); processGetAutoScaleMetricsAnswer(groupTO, new ArrayList<>(), router.getId()); } else { processGetAutoScaleMetricsAnswer(groupTO, answer.getValues(), router.getId()); @@ -2838,24 +2836,26 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } protected boolean updateCountersMap(AutoScaleVmGroupTO groupTO, Map countersMap, Map countersNumberMap) { - logger.debug("Updating countersMap for as group: " + groupTO.getId()); + logger.debug("Updating countersMap for as group [id={}, uuid={}]", groupTO.getId(), groupTO.getUuid()); for (AutoScalePolicyTO policyTO : groupTO.getPolicies()) { Date afterDate = new Date(System.currentTimeMillis() - ((long)policyTO.getDuration() << 10)); List dummyStats = asGroupStatisticsDao.listDummyRecordsByVmGroup(groupTO.getId(), afterDate); if (CollectionUtils.isNotEmpty(dummyStats)) { - logger.error(String.format("Failed to update counters map as there are %d dummy statistics in as group %d", dummyStats.size(), groupTO.getId())); + logger.error("Failed to update counters map as there are {} dummy statistics in as group {}", dummyStats.size(), groupTO.getId()); return false; } List inactiveStats = asGroupStatisticsDao.listInactiveByVmGroupAndPolicy(groupTO.getId(), policyTO.getId(), afterDate); if (CollectionUtils.isNotEmpty(inactiveStats)) { - logger.error(String.format("Failed to update counters map as there are %d Inactive statistics in as group %d and policy %s", inactiveStats.size(), groupTO.getId(), policyTO.getId())); + logger.error("Failed to update counters map as there are {} Inactive " + + "statistics in as group [id={}, uuid={}] and policy [id={}, uuid={}]", + inactiveStats.size(), groupTO.getId(), groupTO.getUuid(), policyTO.getId(), policyTO.getUuid()); continue; } for (ConditionTO conditionTO : policyTO.getConditions()) { updateCountersMapPerCondition(groupTO, policyTO, conditionTO, afterDate, countersMap, countersNumberMap); } } - logger.debug("DONE Updating countersMap for as group: " + groupTO.getId()); + logger.debug("DONE Updating countersMap for as group [id={}, uuid={}]", groupTO.getId(), groupTO.getUuid()); return true; } @@ -2865,15 +2865,17 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage CounterTO counter = conditionTO.getCounter(); List stats = asGroupStatisticsDao.listByVmGroupAndPolicyAndCounter(groupTO.getId(), policyTO.getId(), counter.getId(), afterDate); if (CollectionUtils.isEmpty(stats)) { - logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no stats", groupTO.getId(), policyTO.getId(), counter.getId())); + logger.debug("Skipping updating countersMap for group {} and policy {} and " + + "counter {} due to no stats", groupTO.getId(), groupTO.getUuid(), policyTO.getId()); return; } - logger.debug(String.format("Updating countersMap with %d stats for group %s and policy %s and counter %s", stats.size(), groupTO.getId(), policyTO.getId(), counter.getId())); + logger.debug("Updating countersMap with {} stats for group {} and policy {} and " + + "counter {}", stats.size(), groupTO.getId(), groupTO.getUuid(), policyTO.getId()); Map> aggregatedRecords = new HashMap<>(); List incorrectRecords = new ArrayList<>(); for (AutoScaleVmGroupStatisticsVO stat : stats) { if (Arrays.asList(AutoScaleValueType.INSTANT_VM, AutoScaleValueType.INSTANT_VM_GROUP).contains(stat.getValueType())) { - updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counter.getId(), conditionId, policyTO.getId(), stat.getRawValue(), stat.getValueType()); + updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counter.getId(), conditionTO, policyTO, stat.getRawValue(), stat.getValueType()); } else if (Arrays.asList(AutoScaleValueType.AGGREGATED_VM, AutoScaleValueType.AGGREGATED_VM_GROUP).contains(stat.getValueType())) { String key = stat.getCounterId() + "-" + stat.getResourceId(); if (incorrectRecords.contains(key)) { @@ -2899,12 +2901,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } } - updateCountersMapByAggregatedRecords(countersMap, countersNumberMap, aggregatedRecords, conditionId, policyTO.getId(), groupTO.getId()); + updateCountersMapByAggregatedRecords(countersMap, countersNumberMap, aggregatedRecords, conditionTO, policyTO, groupTO); } public void updateCountersMapByAggregatedRecords(Map countersMap, Map countersNumberMap, Map> aggregatedRecords, - Long conditionId, Long policyId, Long groupId) { + ConditionTO condition, AutoScalePolicyTO policy, AutoScaleVmGroupTO group) { if (MapUtils.isNotEmpty(aggregatedRecords)) { logger.debug("Processing aggregated data"); for (Map.Entry> aggregatedRecord : aggregatedRecords.entrySet()) { @@ -2912,21 +2914,24 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage Long counterId = Long.valueOf(recordKey.split("-")[0]); List records = aggregatedRecord.getValue(); if (records.size() <= 1) { - logger.info(String.format("Ignoring aggregated records, conditionId = %s, counterId = %s", conditionId, counterId)); + logger.info(String.format("Ignoring aggregated records, condition [id=%d, uuid=%s], counterId = %s", + condition.getId(), condition.getUuid(), counterId)); continue; } AutoScaleVmGroupStatisticsVO firstRecord = records.get(0); AutoScaleVmGroupStatisticsVO lastRecord = records.get(records.size() - 1); Double coVal = (lastRecord.getRawValue() - firstRecord.getRawValue()) * 1000 / (lastRecord.getCreated().getTime() - firstRecord.getCreated().getTime()); if (AutoScaleValueType.AGGREGATED_VM_GROUP.equals(firstRecord.getValueType())) { - Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(groupId); + Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(group.getId()); if (currentVM == 0) { - logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupId, policyId, counterId)); + logger.debug("Skipping updating countersMap for group [id={}," + + " uuid={}] and policy [id={}, uuid={}] and counter {} due to no VMs", + group.getId(), group.getUuid(), policy.getId(), policy.getUuid(), counterId); return; } coVal = coVal / currentVM; } - String key = generateKeyFromPolicyAndConditionAndCounter(policyId, conditionId, counterId); + String key = generateKeyFromPolicyAndConditionAndCounter(policy.getId(), condition.getId(), counterId); updateCountersMapWithProcessedData(countersMap, countersNumberMap, key, coVal); } } @@ -2943,14 +2948,15 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage Integer duration = policyTO.getDuration(); Integer delaySecs = cleanupDelay >= duration ? cleanupDelay : duration; Date beforeDate = new Date(System.currentTimeMillis() - ((long)delaySecs * 1000)); - logger.debug(String.format("Removing stats for policy %d in as group %d, before %s", policyTO.getId(), groupTO.getId(), beforeDate)); + logger.debug("Removing stats for policy [id={}, uuid={}] in as group [id={}, uuid={}], before {}", + policyTO.getId(), policyTO.getUuid(), groupTO.getId(), groupTO.getUuid(), beforeDate); asGroupStatisticsDao.removeByGroupAndPolicy(groupTO.getId(), policyTO.getId(), beforeDate); if (delaySecs > maxDelaySecs) { maxDelaySecs = delaySecs; } } Date beforeDate = new Date(System.currentTimeMillis() - ((long)maxDelaySecs * 1000)); - logger.debug(String.format("Removing stats for other policies in as group %d, before %s", groupTO.getId(), beforeDate)); + logger.debug(String.format("Removing stats for other policies in as group [id=%d, uuid=%s], before %s", groupTO.getId(), groupTO.getUuid(), beforeDate)); asGroupStatisticsDao.removeByGroupId(groupTO.getId(), beforeDate); } @@ -3034,15 +3040,15 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } protected boolean destroyVm(Long vmId) { + UserVmVO vm = userVmDao.findById(vmId); try { - UserVmVO vm = userVmDao.findById(vmId); if (vm != null) { userVmMgr.destroyVm(vmId, true); userVmMgr.expunge(vm); } return true; } catch (Exception ex) { - logger.error("Cannot destroy vm with id: " + vmId + "due to Exception: ", ex); + logger.error("Cannot destroy vm {} with id: {} due to Exception: ", vm, vmId, ex); return false; } } diff --git a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java index 5d468d95e4c..6ef84a04b9f 100644 --- a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java +++ b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -25,6 +25,7 @@ import java.util.Set; import javax.inject.Inject; +import com.cloud.host.Host; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -341,13 +342,13 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle @Override public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) { if (_networkModel.getUserDataUpdateProvider(network).getProvider().equals(Provider.ConfigDrive)) { - logger.trace(String.format("[prepareMigration] for vm: %s", vm.getInstanceName())); + logger.trace(String.format("[prepareMigration] for vm: %s", vm)); try { if (isConfigDriveIsoOnHostCache(vm.getId())) { vm.setConfigDriveLocation(Location.HOST); if (configureConfigDriveData(vm, nic, dest)) { // Create the config drive on dest host cache - createConfigDriveIsoOnHostCache(vm, dest.getHost().getId()); + createConfigDriveIsoOnHostCache(nic, vm, dest.getHost()); } } else { vm.setConfigDriveLocation(getConfigDriveLocation(vm.getId())); @@ -548,13 +549,13 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle return false; } - private boolean createConfigDriveIsoOnHostCache(VirtualMachineProfile profile, Long hostId) throws ResourceUnavailableException { - if (hostId == null) { + private boolean createConfigDriveIsoOnHostCache(NicProfile nic, VirtualMachineProfile profile, Host host) throws ResourceUnavailableException { + if (host == null) { throw new ResourceUnavailableException("Config drive iso creation failed, dest host not available", ConfigDriveNetworkElement.class, 0L); } - logger.debug("Creating config drive ISO for vm: " + profile.getInstanceName() + " on host: " + hostId); + logger.debug("Creating config drive ISO for vm: {} on host: {}", profile, host); Map customUserdataParamMap = getVMCustomUserdataParamMap(profile.getId()); @@ -565,9 +566,9 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle final String isoData = ConfigDriveBuilder.buildConfigDrive(nicProfiles, profile.getVmData(), isoFileName, profile.getConfigDriveLabel(), customUserdataParamMap, supportedServices); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, null, false, true, true); - final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(host.getId(), configDriveIsoCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to handle config drive creation for vm: " + profile.getInstanceName() + " on host: " + hostId); + throw new CloudRuntimeException(String.format("Unable to get an answer to handle config drive creation for vm: %s on host: %s", profile, host)); } if (!answer.getResult()) { @@ -587,26 +588,26 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle ConfigDriveNetworkElement.class, 0L); } - logger.debug("Deleting config drive ISO for vm: " + vm.getInstanceName() + " on host: " + hostId); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, null, false, true, false); HostVO hostVO = _hostDao.findById(hostId); + logger.debug("Deleting config drive ISO for vm: {} on host: {}({})", vm, hostId, hostVO); if (hostVO == null) { logger.warn(String.format("Host %s appears to be unavailable, skipping deletion of config-drive ISO on host cache", hostId)); return false; } if (!Arrays.asList(Status.Up, Status.Connecting).contains(hostVO.getStatus())) { - logger.warn(String.format("Host status %s is not Up or Connecting, skipping deletion of config-drive ISO on host cache", hostId)); + logger.warn("Host status {} is not Up or Connecting, skipping deletion of config-drive ISO on host cache", hostVO); return false; } final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to handle config drive deletion for vm: " + vm.getInstanceName() + " on host: " + hostId); + throw new CloudRuntimeException(String.format("Unable to get an answer to handle config drive deletion for vm: %s on host: %s", vm, hostVO)); } if (!answer.getResult()) { - logger.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + logger.error("Failed to remove config drive for instance: {}", vm); return false; } return true; @@ -641,7 +642,7 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle ConfigDriveNetworkElement.class, 0L); } - logger.debug("Creating config drive ISO for vm: " + profile.getInstanceName()); + logger.debug("Creating config drive ISO for vm: {}", profile); Map customUserdataParamMap = getVMCustomUserdataParamMap(profile.getId()); @@ -721,7 +722,7 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle Long hostId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); Location location = getConfigDriveLocation(vm.getId()); if (hostId == null) { - logger.info(String.format("The VM was never booted; no config-drive ISO created for VM %s", vm.getName())); + logger.info("The VM was never booted; no config-drive ISO created for VM {}", vm); return true; } if (location == Location.HOST) { @@ -749,14 +750,14 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle ConfigDriveNetworkElement.class, 0L); } - logger.debug("Deleting config drive ISO for vm: " + vm.getInstanceName()); + logger.debug("Deleting config drive ISO for vm: {}", vm); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false, false, false); final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { - logger.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + logger.error("Failed to remove config drive for instance: {}", vm); return false; } return true; diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index 53cf838ca87..d4aeae23a2d 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -281,7 +281,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle(network, Service.Firewall)) { final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply firewall rules on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -328,7 +328,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply lb rules on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -352,7 +352,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply vpn users on the backend; virtual router" + " doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply vpn users on the backend; virtual router doesn't exist in the network {}", network); return null; } @@ -376,7 +376,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't" + " exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need stop vpn on the backend; virtual router doesn't exist in the network {}", network); return true; } return _routerMgr.startRemoteAccessVpn(network, vpn, routers); @@ -396,8 +396,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug(String.format("There is no virtual router in network [uuid: %s, name: %s], it is not necessary to stop the VPN on backend.", - network.getUuid(), network.getName())); + logger.debug("There is no virtual router in network {}, it is not necessary to stop the VPN on backend.", network); return true; } return _routerMgr.deleteRemoteAccessVpn(network, vpn, routers); @@ -420,7 +419,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle) { final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -592,7 +591,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle(network, Service.StaticNat)) { final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply static nat on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -694,7 +693,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network {}", network); return true; } @@ -752,7 +751,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network {}", network); return true; } @@ -802,7 +801,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network {}", network); return true; } @@ -876,7 +875,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ if (canHandle(network, Service.PortForwarding)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply firewall rules on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -1245,7 +1244,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ logger.debug("Successfully saved user data to router"); } } else { - logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vm.getId() + " because it is not supported in network id=" + network.getId()); + logger.debug("Not applying userdata for nic {} in vm {} because it is not supported in network {}", nic, vm, network); } } diff --git a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java index 841f6221182..3d613fca18e 100644 --- a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java @@ -60,6 +60,7 @@ import com.cloud.network.vpc.StaticRouteProfile; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcGateway; import com.cloud.network.vpc.VpcManager; +import com.cloud.network.vpc.dao.NetworkACLDao; import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.offering.NetworkOffering; @@ -99,6 +100,8 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Inject VpcGatewayDao _vpcGatewayDao; @Inject + NetworkACLDao _networkACLDao; + @Inject NetworkACLItemDao _networkACLItemDao; @Inject EntityManager _entityMgr; @@ -435,7 +438,9 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - logger.debug(getName() + " element doesn't need to create Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug("{} element doesn't need to create Private gateway on the backend; VPC" + + " virtual router doesn't exist in the vpc id {} ({})", + this::getName, gateway::getVpcId, () -> _vpcDao.findById(gateway.getVpcId())); return true; } @@ -454,7 +459,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List rules = _networkACLItemDao.listByACL(gateway.getNetworkACLId()); result = result && networkTopology.applyNetworkACLs(network, rules, domainRouterVO, isPrivateGateway); } catch (final Exception ex) { - logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); + logger.debug("Failed to apply network acl {} on gateway ", () -> _networkACLDao.findById(gateway.getNetworkACLId())); return false; } } @@ -472,7 +477,9 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - logger.debug(getName() + " element doesn't need to delete Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug("{} element doesn't need to delete Private gateway on the backend; VPC " + + "virtual router doesn't exist in the vpc id {} ({})", + this::getName, gateway::getVpcId, () -> _vpcDao.findById(gateway.getVpcId())); return true; } @@ -501,8 +508,8 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc if (canHandle) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug(getName() + " element doesn't need to associate ip addresses on the backend; VPC virtual " + "router doesn't exist in the network " - + network.getId()); + logger.debug("{} element doesn't need to associate ip addresses on the backend; " + + "VPC virtual router doesn't exist in the network {}", getName(), network); return false; } @@ -522,7 +529,8 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc if (canHandle(network, Service.NetworkACL)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply firewall rules on the " + + "backend; virtual router doesn't exist in the network {}", network); return true; } @@ -533,7 +541,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc try { result = result && networkTopology.applyNetworkACLs(network, rules, domainRouterVO, false); } catch (final Exception ex) { - logger.debug("Failed to apply network acl in network " + network.getId()); + logger.debug("Failed to apply network acl in network {}", network); } } } @@ -576,7 +584,8 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router element doesn't need to apply network acl rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply network acl rules on the " + + "backend; virtual router doesn't exist in the network {}", network); return true; } @@ -612,12 +621,12 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final Vpc vpc = _entityMgr.findById(Vpc.class, vpcId); if (!_ntwkModel.isProviderEnabledInZone(vpc.getZoneId(), Provider.VPCVirtualRouter.getName())) { - throw new ResourceUnavailableException("VPC provider is not enabled in zone " + vpc.getZoneId(), DataCenter.class, vpc.getZoneId()); + throw new ResourceUnavailableException(String.format("VPC provider is not enabled in zone %s", _dcDao.findById(vpc.getZoneId())), DataCenter.class, vpc.getZoneId()); } final List routers = _vpcRouterMgr.getVpcRouters(ip.getVpcId()); if (routers == null) { - throw new ResourceUnavailableException("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc " + ip.getVpcId(), DataCenter.class, + throw new ResourceUnavailableException(String.format("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc %s", vpc), DataCenter.class, vpc.getZoneId()); } @@ -643,12 +652,12 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final Vpc vpc = _entityMgr.findById(Vpc.class, vpcId); if (!_ntwkModel.isProviderEnabledInZone(vpc.getZoneId(), Provider.VPCVirtualRouter.getName())) { - throw new ResourceUnavailableException("VPC provider is not enabled in zone " + vpc.getZoneId(), DataCenter.class, vpc.getZoneId()); + throw new ResourceUnavailableException(String.format("VPC provider is not enabled in zone %s", _dcDao.findById(vpc.getZoneId())), DataCenter.class, vpc.getZoneId()); } final List routers = _vpcRouterMgr.getVpcRouters(ip.getVpcId()); if (routers == null) { - throw new ResourceUnavailableException("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc " + ip.getVpcId(), DataCenter.class, + throw new ResourceUnavailableException(String.format("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc %s", vpc), DataCenter.class, vpc.getZoneId()); } @@ -669,7 +678,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(vpcId); if (routers == null) { - logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpcId); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network {}", () -> _vpcDao.findById(vpcId)); return null; } @@ -698,7 +707,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network {}", () -> _vpcDao.findById(vpn.getVpcId())); return false; } @@ -717,7 +726,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network {}", () -> _vpcDao.findById(vpn.getVpcId())); return false; } diff --git a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java index 6b98fc00c59..6b8133534d2 100644 --- a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java @@ -451,7 +451,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, if (newRule.getProtocol().equalsIgnoreCase(NetUtils.ICMP_PROTO) && newRule.getProtocol().equalsIgnoreCase(rule.getProtocol())) { if (newRule.getIcmpCode().longValue() == rule.getIcmpCode().longValue() && newRule.getIcmpType().longValue() == rule.getIcmpType().longValue() && newRule.getProtocol().equalsIgnoreCase(rule.getProtocol()) && duplicatedCidrs) { - throw new InvalidParameterValueException("New rule conflicts with existing rule id=" + rule.getId()); + throw new InvalidParameterValueException(String.format("New rule conflicts with existing rule: %s", rule)); } } @@ -483,8 +483,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, (rule.getPurpose() == Purpose.LoadBalancing && newRule.getPurpose() == Purpose.Vpn && !newRule.getProtocol().equalsIgnoreCase(rule.getProtocol())); if (!(allowPf || allowStaticNat || oneOfRulesIsFirewall || allowVpnPf || allowVpnLb)) { - throw new NetworkRuleConflictException("The range specified, " + newRule.getSourcePortStart() + "-" + newRule.getSourcePortEnd() + - ", conflicts with rule " + rule.getId() + " which has " + rule.getSourcePortStart() + "-" + rule.getSourcePortEnd()); + throw new NetworkRuleConflictException(String.format("The range specified, %d-%d, conflicts with rule %s which has %d-%d", newRule.getSourcePortStart(), newRule.getSourcePortEnd(), rule, rule.getSourcePortStart(), rule.getSourcePortEnd())); } } } @@ -609,7 +608,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } else if (proto.equalsIgnoreCase(NetUtils.ICMP_PROTO) && purpose != Purpose.Firewall) { throw new InvalidParameterValueException("Protocol " + proto + " is currently supported only for rules with purpose " + Purpose.Firewall); } else if (purpose == Purpose.Firewall && !supportedTrafficTypes.contains(trafficType.toString().toLowerCase())) { - throw new InvalidParameterValueException("Traffic Type " + trafficType + " is currently supported by Firewall in network " + networkId); + throw new InvalidParameterValueException(String.format("Traffic Type %s is currently supported by Firewall in network %s", trafficType, network)); } } @@ -639,15 +638,13 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, if (rule.getState() == FirewallRule.State.Revoke) { FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(rule.getId()); if (relatedRule != null) { - logger.warn("Can't remove the firewall rule id=" + rule.getId() + " as it has related firewall rule id=" + relatedRule.getId() + - "; leaving it in Revoke state"); + logger.warn(String.format("Can't remove the firewall rule [%s] as it has related firewall rule [%s]; leaving it in Revoke state", rule, relatedRule)); success = false; } else { removeRule(rule); if (rule.getSourceIpAddressId() != null) { //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(rule.getSourceIpAddressId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), rule.getNetworkId()); + _vpcMgr.unassignIPFromVpcNetwork(rule.getSourceIpAddressId(), rule.getNetworkId()); } } } else if (rule.getState() == FirewallRule.State.Add) { @@ -974,12 +971,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, @Override @ActionEvent(eventType = EventTypes.EVENT_FIREWALL_CLOSE, eventDescription = "revoking firewall rule", async = true) - public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException { List rules = new ArrayList(); - List fwRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall); + List fwRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ip.getId(), Purpose.Firewall); if (logger.isDebugEnabled()) { - logger.debug("Releasing " + fwRules.size() + " firewall rules for ip id=" + ipId); + logger.debug("Releasing {} firewall rules for ip {}", fwRules.size(), ip); } for (FirewallRuleVO rule : fwRules) { @@ -989,7 +986,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } // now send everything to the backend - List rulesToApply = _firewallDao.listByIpAndPurpose(ipId, Purpose.Firewall); + List rulesToApply = _firewallDao.listByIpAndPurpose(ip.getId(), Purpose.Firewall); //apply rules if (!applyFirewallRules(rulesToApply, rulesContinueOnErrFlag, caller)) { if (!rulesContinueOnErrFlag) { @@ -997,10 +994,10 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } } // Now we check again in case more rules have been inserted. - rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall)); + rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ip.getId(), Purpose.Firewall)); if (logger.isDebugEnabled()) { - logger.debug("Successfully released firewall rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + logger.debug("Successfully released firewall rules for ip {} and # of rules now = {}", ip, rules.size()); } return rules.size() == 0; @@ -1025,12 +1022,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, @Override @ActionEvent(eventType = EventTypes.EVENT_FIREWALL_CLOSE, eventDescription = "revoking firewall rule", async = true) - public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException { List rules = new ArrayList(); - List fwRules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall); + List fwRules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(network.getId(), Purpose.Firewall); if (logger.isDebugEnabled()) { - logger.debug("Releasing " + fwRules.size() + " firewall rules for network id=" + networkId); + logger.debug("Releasing {} firewall rules for network {}", fwRules.size(), network); } for (FirewallRuleVO rule : fwRules) { @@ -1040,14 +1037,14 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } // now send everything to the backend - List rulesToApply = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.Firewall); + List rulesToApply = _firewallDao.listByNetworkAndPurpose(network.getId(), Purpose.Firewall); boolean success = applyFirewallRules(rulesToApply, true, caller); // Now we check again in case more rules have been inserted. - rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall)); + rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(network.getId(), Purpose.Firewall)); if (logger.isDebugEnabled()) { - logger.debug("Successfully released firewall rules for network id=" + networkId + " and # of rules now = " + rules.size()); + logger.debug("Successfully released firewall rules for network {} and # of rules now = {}", network, rules.size()); } return success && rules.size() == 0; @@ -1062,7 +1059,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, return true; } - logger.debug("Revoking Firewall rule id=" + fwRule.getId() + " as a part of rule delete id=" + ruleId + " with apply=" + apply); + logger.debug("Revoking Firewall rule [{}] as a part of rule delete id={} with apply={}", fwRule, ruleId, apply); return revokeIngressFirewallRule(fwRule.getId(), apply); } @@ -1098,10 +1095,10 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, Set ipsToReprogram = new HashSet(); if (firewallRules.isEmpty()) { - logger.debug("No firewall rules are found for vm id=" + vmId); + logger.debug("No firewall rules are found for vm: {}", vm); return true; } else { - logger.debug("Found " + firewallRules.size() + " to cleanup for vm id=" + vmId); + logger.debug("Found {} to cleanup for vm: {}", firewallRules.size(), vm); } for (FirewallRuleVO rule : firewallRules) { @@ -1112,11 +1109,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, // apply rules for all ip addresses for (Long ipId : ipsToReprogram) { - logger.debug("Applying firewall rules for ip address id=" + ipId + " as a part of vm expunge"); + IPAddressVO ip = _ipAddressDao.findById(ipId); + logger.debug("Applying firewall rules for ip address {} with id={} as a part of vm expunge", ip, ipId); try { success = success && applyIngressFirewallRules(ipId, _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException ex) { - logger.warn("Failed to apply firewall rules for ip id=" + ipId); + logger.warn("Failed to apply firewall rules for ip {}", ip); success = false; } } diff --git a/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java index a8c98fc1dee..28948174fb0 100644 --- a/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java @@ -359,7 +359,7 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { @DB public void deallocate(final Network network, final NicProfile nic, VirtualMachineProfile vm) { if (logger.isDebugEnabled()) { - logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + logger.debug("Deallocate network: network: {}, nic: {}", network, nic); } if (nic.getIPv4Address() != null) { @@ -371,14 +371,14 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { // if the ip address a part of placeholder, don't release it Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null && placeholderNic.getIPv4Address().equalsIgnoreCase(ip.getAddress().addr())) { - logger.debug("Not releasing direct ip " + ip.getId() + " yet as its ip is saved in the placeholder"); + logger.debug("Not releasing direct ip {} yet as its ip is saved in the placeholder", ip); } else { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); } //unassign nic secondary ip address - logger.debug("remove nic " + nic.getId() + " secondary ip "); + logger.debug("remove nic {} secondary ip ", nic); List nicSecIps = null; nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); for (String secIp : nicSecIps) { diff --git a/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index 2800e3284c1..2eada6ae427 100644 --- a/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -226,16 +226,16 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { */ if (vlan.getIp6Cidr() != null) { if (nic.getIPv6Address() == null) { - logger.debug("Found IPv6 CIDR " + vlan.getIp6Cidr() + " for VLAN " + vlan.getId()); + logger.debug("Found IPv6 CIDR {} for VLAN {}", vlan.getIp6Cidr(), vlan); nic.setIPv6Cidr(vlan.getIp6Cidr()); nic.setIPv6Gateway(vlan.getIp6Gateway()); IPv6Address ipv6addr = NetUtils.EUI64Address(vlan.getIp6Cidr(), nic.getMacAddress()); - logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); + logger.info("Calculated IPv6 address {} using EUI-64 for NIC {}", ipv6addr, nic); nic.setIPv6Address(ipv6addr.toString()); } } else { - logger.debug("No IPv6 CIDR configured for VLAN " + vlan.getId()); + logger.debug("No IPv6 CIDR configured for VLAN {}", vlan); } } }); diff --git a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java index c46be9bf428..96c3da66c09 100644 --- a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java @@ -201,7 +201,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur } if (methods.isEmpty()) { // The empty isolation method is assumed to be VLAN - logger.debug("Empty physical isolation type for physical network " + physicalNetwork.getUuid()); + logger.debug("Empty physical isolation type for physical network {}", physicalNetwork); methods = new ArrayList(1); methods.add("VLAN".toLowerCase()); } @@ -297,7 +297,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur public void deallocate(final Network network, final NicProfile nic, final VirtualMachineProfile vm) { if (network.getSpecifyIpRanges()) { if (logger.isDebugEnabled()) { - logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + logger.debug("Deallocate network: {}, nic: {}", network, nic); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -321,7 +321,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur } if (pNetwork.getVnet() == null) { - throw new CloudRuntimeException("Could not find vlan range for physical Network " + physicalNetworkId + "."); + throw new CloudRuntimeException(String.format("Could not find vlan range for physical Network %s.", pNetwork)); } Integer lowestVlanTag = null; final List> vnetList = pNetwork.getVnet(); @@ -437,7 +437,8 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur if (network.getGuestType() != GuestType.L2 && vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null) { - logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); + logger.debug("Nic {} got an ip address {} stored in placeholder nic " + + "for the network {}", nic, placeholderNic.getIPv4Address(), network); guestIp = placeholderNic.getIPv4Address(); } } @@ -515,11 +516,11 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur } if ((profile.getBroadcastDomainType() == BroadcastDomainType.Vlan || profile.getBroadcastDomainType() == BroadcastDomainType.Vxlan) && !offering.isSpecifyVlan()) { - logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug("Releasing vnet for the network: {}", profile); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), profile.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_ZONE_VLAN_RELEASE, - "Released Zone Vnet: " + BroadcastDomainType.getValue(profile.getBroadcastUri()) + " for Network: " + profile.getId(), + String.format("Released Zone Vnet: %s for Network: %s", BroadcastDomainType.getValue(profile.getBroadcastUri()), profile), profile.getDataCenterId(), ApiCommandResourceType.Zone.toString(), 0); } diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 320f064a31e..015cbe49049 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -376,14 +376,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements List conditionTOs = new ArrayList<>(lbConditions.size()); for (LbCondition lbCondition : lbConditions) { Counter counter = lbCondition.getCounter(); - LoadBalancerTO.CounterTO counterTO = new LoadBalancerTO.CounterTO(counter.getId(), counter.getName(), counter.getSource(), "" + counter.getValue(), counter.getProvider()); + LoadBalancerTO.CounterTO counterTO = new LoadBalancerTO.CounterTO(counter.getId(), counter.getUuid(), counter.getName(), counter.getSource(), "" + counter.getValue(), counter.getProvider()); Condition condition = lbCondition.getCondition(); - LoadBalancerTO.ConditionTO conditionTO = new LoadBalancerTO.ConditionTO(condition.getId(), condition.getThreshold(), condition.getRelationalOperator(), counterTO); + LoadBalancerTO.ConditionTO conditionTO = new LoadBalancerTO.ConditionTO(condition.getId(), condition.getUuid(), condition.getThreshold(), condition.getRelationalOperator(), counterTO); conditionTOs.add(conditionTO); } AutoScalePolicy autoScalePolicy = lbAutoScalePolicy.getPolicy(); - autoScalePolicyTOs.add(new LoadBalancerTO.AutoScalePolicyTO(autoScalePolicy.getId(), autoScalePolicy.getDuration(), autoScalePolicy.getQuietTime(), autoScalePolicy.getLastQuietTime(), - autoScalePolicy.getAction(), conditionTOs, lbAutoScalePolicy.isRevoked())); + autoScalePolicyTOs.add(new LoadBalancerTO.AutoScalePolicyTO(autoScalePolicy.getId(), autoScalePolicy.getUuid(), autoScalePolicy.getDuration(), autoScalePolicy.getQuietTime(), + autoScalePolicy.getLastQuietTime(), autoScalePolicy.getAction(), conditionTOs, lbAutoScalePolicy.isRevoked())); } LbAutoScaleVmProfile lbAutoScaleVmProfile = lbAutoScaleVmGroup.getProfile(); AutoScaleVmProfile autoScaleVmProfile = lbAutoScaleVmProfile.getProfile(); @@ -415,7 +415,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Network network = _networkDao.findById(loadBalancer.getNetworkId()); List providers = _networkMgr.getProvidersForServiceInNetwork(network, Network.Service.Lb); if (CollectionUtils.isEmpty(providers)) { - throw new CloudRuntimeException(String.format("Unable to find LB provider for network with id: %s ", network.getId())); + throw new CloudRuntimeException(String.format("Unable to find LB provider for network: %s ", network)); } return providers.get(0); } @@ -477,16 +477,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState); } catch (ResourceUnavailableException e) { - logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavailable:", e); + logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: {} because resource is unavailable:", loadBalancer, e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); + logger.debug("LB Rollback rule: {} lb state rollback while creating AutoscaleVmGroup", loadBalancer); } throw e; } finally { if (!success) { - logger.warn("Failed to configure LB Auto Scale Vm Group with Id:" + vmGroupid); + logger.warn(String.format("Failed to configure LB Auto Scale Vm Group: %s", vmGroup)); } } @@ -496,15 +496,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public void doInTransactionWithoutResult(TransactionStatus status) { loadBalancer.setState(FirewallRule.State.Active); - logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); + logger.debug("LB rule {} state is set to Active", loadBalancer); _lbDao.persist(loadBalancer); vmGroup.setState(AutoScaleVmGroup.State.ENABLED); _autoScaleVmGroupDao.persist(vmGroup); - logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); + logger.debug("LB Auto Scale Vm Group: {} is set to Enabled state.", vmGroup); } }); } - logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); + logger.info("Successfully configured LB Autoscale Vm Group: {}", vmGroup); } return success; } @@ -603,12 +603,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " is in deleting state: "); + throw new InvalidParameterValueException(String.format("Failed: LB rule: %s is in deleting state: ", loadBalancer)); } /* Generic validations */ if (!genericValidator(cmd)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException(String.format("Failed to create Stickiness policy: Validation Failed %s", loadBalancer)); } /* @@ -623,7 +623,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRule lbRule = new LoadBalancingRule(loadBalancer, getExistingDestinations(lbpolicy.getId()), policyList, null, sourceIp, null, loadBalancer.getLbProtocol()); if (!validateLbRule(lbRule)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException(String.format("Failed to create Stickiness policy: Validation Failed %s", loadBalancer)); } /* Finally Insert into DB */ @@ -663,7 +663,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " is in deleting state: "); + throw new InvalidParameterValueException(String.format("Failed: LB rule: %s is in deleting state: ", loadBalancer)); } /* @@ -672,13 +672,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements */ if (!validateHealthCheck(cmd)) { throw new InvalidParameterValueException( - "Failed to create HealthCheck policy: Validation Failed (HealthCheck Policy is not supported by LB Provider for the LB rule id :" + cmd.getLbRuleId() + ")"); + String.format("Failed to create HealthCheck policy: Validation Failed (HealthCheck Policy is not supported by LB Provider for the LB rule:%s)", loadBalancer)); } /* Validation : check for the multiple hc policies to the rule id */ List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(cmd.getLbRuleId(), false); if (hcPolicies.size() > 0) { - throw new InvalidParameterValueException("Failed to create HealthCheck policy: Already policy attached for the LB Rule id :" + cmd.getLbRuleId()); + throw new InvalidParameterValueException(String.format("Failed to create HealthCheck policy: Already policy attached for the LB Rule:%s", loadBalancer)); } /* * Specific validations using network element validator for specific @@ -752,12 +752,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); + logger.warn("Unable to apply Stickiness policy to the lb rule: {} because resource is unavailable:", loadBalancer, e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); deleteLBStickinessPolicy(cmd.getEntityId(), false); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + logger.debug("LB Rollback rule: {} lb state rollback while creating sticky policy", loadBalancer); } else { deleteLBStickinessPolicy(cmd.getEntityId(), false); if (oldStickinessPolicyId != 0) { @@ -798,11 +798,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); + logger.warn("Unable to apply healthcheck policy to the lb rule: {} because resource is unavailable:", loadBalancer, e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating healthcheck policy"); + logger.debug("LB Rollback rule: {} lb state rollback while creating healthcheck policy", loadBalancer); } deleteLBHealthCheckPolicy(cmd.getEntityId(), false); success = false; @@ -823,7 +823,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(stickinessPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + " for Stickiness policy id: " + stickinessPolicyId); + throw new InvalidParameterValueException(String.format("Invalid Load balancer: %d for Stickiness policy: %s", + stickinessPolicy.getLoadBalancerId(), stickinessPolicy)); } long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); @@ -838,12 +839,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean backupStickyState = stickinessPolicy.isRevoke(); stickinessPolicy.setRevoke(true); _lb2stickinesspoliciesDao.persist(stickinessPolicy); - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); + logger.debug("Set load balancer rule for revoke: rule {}, stickinesspolicy {}", loadBalancer, stickinessPolicy); try { if (!applyLoadBalancerConfig(loadBalancerId)) { - logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); - throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); + String error = String.format("Failed to remove load balancer rule %s for stickinesspolicy %s", loadBalancer, stickinessPolicy); + logger.warn(error); + throw new CloudRuntimeException(error); } } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { @@ -851,7 +853,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lb2stickinesspoliciesDao.persist(stickinessPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); + logger.debug("LB Rollback rule: {} while deleting sticky policy: {}", loadBalancer, stickinessPolicy); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; @@ -876,7 +878,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(healthCheckPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + " for HealthCheck policy id: " + healthCheckPolicyId); + throw new InvalidParameterValueException(String.format("Invalid Load balancer: %d for HealthCheck policy: %s", healthCheckPolicy.getLoadBalancerId(), healthCheckPolicy)); } final long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); @@ -891,7 +893,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean backupStickyState = healthCheckPolicy.isRevoke(); healthCheckPolicy.setRevoke(true); _lb2healthcheckDao.persist(healthCheckPolicy); - logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + ", healthCheckpolicyID " + healthCheckPolicyId); + logger.debug("Set health check policy to revoke for loadbalancing rule : {}, healthCheckpolicy {}", loadBalancer, healthCheckPolicy); // removing the state of services set by the monitor. final List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); @@ -899,7 +901,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - logger.debug("Resetting health state policy for services in loadbalancing rule id : " + loadBalancerId); + logger.debug("Resetting health state policy for services in loadbalancing rule: {}", loadBalancer); for (LoadBalancerVMMapVO map : maps) { map.setState(null); _lb2VmMapDao.persist(map); @@ -910,8 +912,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { if (!applyLoadBalancerConfig(loadBalancerId)) { - logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); - throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); + String error = String.format("Failed to remove load balancer rule %s for healthCheckpolicy %s", loadBalancer, healthCheckPolicy); + logger.warn(error); + throw new CloudRuntimeException(error); } } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { @@ -919,7 +922,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lb2healthcheckDao.persist(healthCheckPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId); + logger.debug("LB Rollback rule: {} while deleting healthcheck policy: {}", loadBalancer, healthCheckPolicy); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; @@ -1023,7 +1026,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (!isAutoScaleVM && _autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { - throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + " because it is being used by an Autoscale VM group."); + throw new InvalidParameterValueException(String.format("Failed to assign to load balancer %s because it is being used by an Autoscale VM group.", loadBalancer)); } if (instanceIds == null && vmIdIpMap.isEmpty()) { @@ -1212,7 +1215,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements }); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); + logger.debug("LB Rollback rule: {} while attaching VM: {}", loadBalancer, vmInstanceIds); } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -1314,7 +1317,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbDao.persist(loadBalancer); LoadBalancerCertMapVO certMap = _lbCertMapDao.findByLbRuleId(lbRuleId); _lbCertMapDao.remove(certMap.getId()); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert"); + logger.debug("LB Rollback rule: {} while adding cert", loadBalancer); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } @@ -1350,8 +1353,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbCertMapDao.persist(lbCertMap); if (!applyLoadBalancerConfig(lbRuleId)) { - logger.warn("Failed to remove cert from load balancer rule id " + lbRuleId); - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate load balancer rule id " + lbRuleId); + logger.warn("Failed to remove cert from load balancer rule {}", loadBalancer); + CloudRuntimeException ex = new CloudRuntimeException(String.format("Failed to remove certificate load balancer rule %s", loadBalancer)); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; } @@ -1362,11 +1365,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbCertMapDao.persist(lbCertMap); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("Rolled back certificate removal lb id " + lbRuleId); + logger.debug(String.format("Rolled back certificate removal lb %s", loadBalancer)); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); if (!success) { - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate from load balancer rule id " + lbRuleId); + CloudRuntimeException ex = new CloudRuntimeException(String.format("Failed to remove certificate from load balancer rule %s", loadBalancer)); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; } @@ -1435,25 +1438,23 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements lbvm.setRevoke(true); _lb2VmMapDao.persist(lbvm); } - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule {}, vm {}", loadBalancer::toString, () -> _vmDao.findById(instanceId)); } else { for (String vmIp: lbVmIps) { LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmIdVmIp (loadBalancerId, instanceId, vmIp); if (map == null) { - throw new InvalidParameterValueException("The instance id: "+ instanceId +" is not configured " - + " for LB rule id " + loadBalancerId); + throw new InvalidParameterValueException(String.format("The instance id: %d is not configured for LB rule %s", instanceId, loadBalancer)); } map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + - instanceId + ", vmip " + vmIp); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}, vmip {}", loadBalancer::toString, () -> _vmDao.findById(instanceId), vmIp::toString); } } } if (!applyLoadBalancerConfig(loadBalancerId)) { - logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); + logger.warn("Failed to remove load balancer rule {} for vms {}", loadBalancer, instanceIds); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; @@ -1478,21 +1479,20 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmId(loadBalancerId, instanceId); map.setRevoke(false); _lb2VmMapDao.persist(map); - logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId); + logger.debug("LB Rollback rule: {},while removing vmId {}", loadBalancer, instanceId); }else { for (String vmIp: lbVmIps) { LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmIdVmIp (loadBalancerId, instanceId, vmIp); map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + - instanceId + ", vmip " + vmIp); + logger.debug("LB Rollback rule: {},while removing vmId {}, vmip {}", loadBalancer, instanceId, vmIp); } } } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances"); + logger.debug("LB Rollback rule: {} while removing vm instances", loadBalancer); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } @@ -1526,7 +1526,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule {}, vm {}", () -> _lbDao.findById(map.getLoadBalancerId()), () -> _vmDao.findById(instanceId)); } // Reapply all lbs that had the vm assigned @@ -1555,7 +1555,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean result = deleteLoadBalancerRule(loadBalancerId, apply, caller, ctx.getCallingUserId(), true); if (!result) { - throw new CloudRuntimeException("Unable to remove load balancer rule " + loadBalancerId); + throw new CloudRuntimeException(String.format("Unable to remove load balancer rule %s", rule)); } return result; } @@ -1575,7 +1575,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (lbCertMap != null) { boolean removeResult = removeCertFromLoadBalancer(loadBalancerId); if (!removeResult) { - throw new CloudRuntimeException("Unable to remove certificate from load balancer rule " + loadBalancerId); + throw new CloudRuntimeException(String.format("Unable to remove certificate from load balancer rule %s", lb)); } } @@ -1600,7 +1600,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}", lb::toString, () -> _vmDao.findById(map.getInstanceId())); } } @@ -1640,12 +1640,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (backupMaps != null) { for (LoadBalancerVMMapVO map : backupMaps) { _lb2VmMapDao.persist(map); - logger.debug("LB Rollback rule id: " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("LB Rollback rule: {}, vmId {}", lb, map.getInstanceId()); } } lb.setState(backupState); _lbDao.persist(lb); - logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule."); + logger.debug("LB Rollback rule: {} while deleting LB rule.", lb); } else { logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } @@ -1655,8 +1655,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(lb.getId()); if (relatedRule != null) { - logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + - "; leaving it in Revoke state"); + logger.warn("Unable to remove firewall rule={} as it has related firewall rule={}; leaving it in Revoke state", lb, relatedRule); return false; } else { _firewallMgr.removeRule(lb); @@ -1667,7 +1666,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // Bug CS-15411 opened to document this // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); - logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); + logger.debug("Load balancer {} is removed successfully", lb); return true; } @@ -1740,7 +1739,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // set networkId just for verification purposes _networkModel.checkIpForService(ipVO, Service.Lb, networkId); - logger.debug("The ip is not associated with the VPC network id=" + networkId + " so assigning"); + logger.debug("The ip is not associated with the VPC network={} so assigning", network); ipVO = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; } @@ -1772,7 +1771,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // release ip address if ipassoc was perfored if (performedIpAssoc) { ipVO = _ipAddressDao.findById(ipVO.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ipVO.getId(), networkId); + _vpcMgr.unassignIPFromVpcNetwork(ipVO, network); } } @@ -1887,8 +1886,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + - " is added successfully."); + logger.debug("Load balancer {} for Ip address: {}, public port {}, private port {} is added successfully.", newRule, ipAddr, srcPort, destPort); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), newRule.getUuid()); @@ -1944,10 +1942,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { - List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); + public boolean revokeLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException { + List lbs = _lbDao.listByNetworkIdAndScheme(network.getId(), scheme); if (logger.isDebugEnabled()) { - logger.debug("Revoking " + lbs.size() + " " + scheme + " load balancing rules for network id=" + networkId); + logger.debug("Revoking {} {} load balancing rules for network {}", lbs.size(), scheme, network); } if (lbs != null) { for (LoadBalancerVO lb : lbs) { // called during restart, not persisting state in db @@ -1955,19 +1953,19 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return applyLoadBalancerRules(lbs, false); // called during restart, not persisting state in db } else { - logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); + logger.info("Network {} doesn't have load balancer rules, nothing to revoke", network); return true; } } @Override - public boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { - List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); + public boolean applyLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException { + List lbs = _lbDao.listByNetworkIdAndScheme(network.getId(), scheme); if (lbs != null) { - logger.debug("Applying load balancer rules of scheme " + scheme + " in network id=" + networkId); + logger.debug("Applying load balancer rules of scheme {} in network {}", scheme, network); return applyLoadBalancerRules(lbs, true); } else { - logger.info("Network id=" + networkId + " doesn't have load balancer rules of scheme " + scheme + ", nothing to apply"); + logger.info("Network {} doesn't have load balancer rules of scheme {}, nothing to apply", network, scheme); return true; } } @@ -2030,11 +2028,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (lb.getState() == FirewallRule.State.Revoke) { removeLBRule(lb); - logger.debug("LB " + lb.getId() + " is successfully removed"); + logger.debug("LB {} is successfully removed", lb); checkForReleaseElasticIp = true; } else if (lb.getState() == FirewallRule.State.Add) { lb.setState(FirewallRule.State.Active); - logger.debug("LB rule " + lb.getId() + " state is set to Active"); + logger.debug("LB rule {} state is set to Active", lb); _lbDao.persist(lb); } @@ -2045,22 +2043,22 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { instanceIds.add(lbVmMap.getInstanceId()); _lb2VmMapDao.remove(lb.getId(), lbVmMap.getInstanceId(), lbVmMap.getInstanceIp(), null); - logger.debug("Load balancer rule id " + lb.getId() + " is removed for vm " + - lbVmMap.getInstanceId() + " instance ip " + lbVmMap.getInstanceIp()); + logger.debug("Load balancer rule {} is removed for vm {} instance ip {}", + lb, lbVmMap.getInstanceId(), lbVmMap.getInstanceIp());; } if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { lb.setState(FirewallRule.State.Add); _lbDao.persist(lb); - logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); + logger.debug("LB rule {} state is set to Add as there are no more active LB-VM mappings", lb); } // remove LB-Stickiness policy mapping that were state to revoke List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lb.getId(), true); if (!stickinesspolicies.isEmpty()) { _lb2stickinesspoliciesDao.remove(lb.getId(), true); - logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); + logger.debug("Load balancer rule {} is removed stickiness policies", lb); } // remove LB-HealthCheck policy mapping that were state to @@ -2068,13 +2066,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), true); if (!healthCheckpolicies.isEmpty()) { _lb2healthcheckDao.remove(lb.getId(), true); - logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + logger.debug("Load balancer rule {} is removed health check monitors policies", lb); } LoadBalancerCertMapVO lbCertMap = _lbCertMapDao.findByLbRuleId(lb.getId()); if (lbCertMap != null && lbCertMap.isRevoke()) { _lbCertMapDao.remove(lbCertMap.getId()); - logger.debug("Load balancer rule id " + lb.getId() + " removed certificate mapping"); + logger.debug("Load balancer rule {} removed certificate mapping", lb); } return checkForReleaseElasticIp; @@ -2100,8 +2098,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // if the rule is the last one for the ip address assigned to // VPC, unassign it from the network if (lb.getSourceIpAddressId() != null) { - IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), lb.getNetworkId()); + _vpcMgr.unassignIPFromVpcNetwork(lb.getSourceIpAddressId(), lb.getNetworkId()); } } } @@ -2113,12 +2110,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); boolean success = true; if (ip.getSystem()) { - logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); - if (!_ipAddrMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { - logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.debug("Releasing system ip address {} as a part of delete lb rule", ip); + if (!_ipAddrMgr.disassociatePublicIpAddress(ip, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { + logger.warn("Unable to release system ip address={} as a part of delete lb rule", ip); success = false; } else { - logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.warn("Successfully released system ip address={} as a part of delete lb rule", ip); } } return success; @@ -2135,7 +2132,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule {}", rule); return false; } } @@ -2151,7 +2148,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule {}", rule); return false; } } @@ -2251,7 +2248,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // Validate rule in LB provider LoadBalancingRule rule = getLoadBalancerRuleToApply(lb); if (!validateLbRule(rule)) { - throw new InvalidParameterValueException("Modifications in lb rule " + lbRuleId + " are not supported."); + throw new InvalidParameterValueException(String.format("Modifications in lb rule %s are not supported.", lb)); } LoadBalancerVO tmplbVo = _lbDao.findById(lbRuleId); @@ -2283,7 +2280,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lbDao.update(lb.getId(), lb); _lbDao.persist(lb); - logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule."); + logger.debug("LB Rollback rule: {} while updating LB rule.", lb); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; @@ -2291,7 +2288,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (!success) { - throw new CloudRuntimeException("Failed to update load balancer rule: " + lbRuleId); + throw new CloudRuntimeException(String.format("Failed to update load balancer rule: %s", lb)); } return lb; @@ -2691,7 +2688,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(policy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); + throw new InvalidParameterValueException(String.format("Invalid Load balancer: %d for Stickiness policy: %s", policy.getLoadBalancerId(), policy)); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); @@ -2718,7 +2715,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(policy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); + throw new InvalidParameterValueException(String.format("Invalid Load balancer : %d for Stickiness policy: %s", policy.getLoadBalancerId(), policy)); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java index 0c21e4559ed..18ce55aa328 100644 --- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java @@ -1368,7 +1368,7 @@ public class CommandSetupHelper { private void setIpAddressNetworkParams(IpAddressTO ipAddress, final Network network, final VirtualRouter router) { if (_networkModel.isPrivateGateway(network.getId())) { - logger.debug("network " + network.getId() + " (name: " + network.getName() + " ) is a vpc private gateway, set traffic type to Public"); + logger.debug("network (id: {}, uuid: {}, name: {}) is a vpc private gateway, set traffic type to Public", network.getId(), network.getUuid(), network.getName()); ipAddress.setTrafficType(TrafficType.Public); ipAddress.setPrivateGateway(true); } else { diff --git a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java index 1f4642bbd85..f33a6c2f632 100644 --- a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java +++ b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java @@ -192,8 +192,8 @@ public class NetworkHelperImpl implements NetworkHelper { @Override public boolean sendCommandsToRouter(final VirtualRouter router, final Commands cmds) throws AgentUnavailableException, ResourceUnavailableException { if (!checkRouterVersion(router)) { - logger.debug("Router requires upgrade. Unable to send command to router:" + router.getId() + ", router template version : " + router.getTemplateVersion() - + ", minimal required version : " + NetworkOrchestrationService.MinVRVersion.valueIn(router.getDataCenterId())); + logger.debug("Router requires upgrade. Unable to send command to router: {}, router template version: {}, minimal required version: {}", + router, router.getTemplateVersion(), NetworkOrchestrationService.MinVRVersion.valueIn(router.getDataCenterId())); throw new ResourceUnavailableException("Unable to send command. Router requires upgrade", VirtualRouter.class, router.getId()); } Answer[] answers = null; @@ -242,10 +242,10 @@ public class NetworkHelperImpl implements NetworkHelper { DomainRouterVO disconnectedRouter = (DomainRouterVO) disconnectedRouters.get(0); if (logger.isDebugEnabled()) { - logger.debug("About to stop the router " + disconnectedRouter.getInstanceName() + " due to: " + reason); + logger.debug("About to stop the router {} due to: {}", disconnectedRouter, reason); } final String title = "Virtual router " + disconnectedRouter.getInstanceName() + " would be stopped after connecting back, due to " + reason; - final String context = "Virtual router (name: " + disconnectedRouter.getInstanceName() + ", id: " + disconnectedRouter.getId() + final String context = "Virtual router (name: " + disconnectedRouter.getInstanceName() + ", id: " + disconnectedRouter.getUuid() + ") would be stopped after connecting back, due to: " + reason; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, disconnectedRouter.getDataCenterId(), disconnectedRouter.getPodIdToDeployIn(), title, context); disconnectedRouter.setStopPending(true); @@ -262,11 +262,10 @@ public class NetworkHelperImpl implements NetworkHelper { @Override public VirtualRouter destroyRouter(final long routerId, final Account caller, final Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - if (logger.isDebugEnabled()) { - logger.debug("Attempting to destroy router " + routerId); - } - final DomainRouterVO router = _routerDao.findById(routerId); + if (logger.isDebugEnabled()) { + logger.debug("Attempting to destroy router {} with id {}", router, routerId); + } if (router == null) { return null; } @@ -326,7 +325,7 @@ public class NetworkHelperImpl implements NetworkHelper { throw new ResourceUnavailableException("Starting router " + router + " failed! " + e.toString(), DataCenter.class, router.getDataCenterId()); } if (router.isStopPending()) { - logger.info("Clear the stop pending flag of router " + router.getHostName() + " after start router successfully!"); + logger.info("Clear the stop pending flag of router {} after start router successfully!", router); router.setStopPending(false); router = _routerDao.persist(router); } @@ -344,7 +343,7 @@ public class NetworkHelperImpl implements NetworkHelper { DomainRouterVO vm = _routerDao.findById(router.getId()); if (logger.isDebugEnabled()) { - logger.debug("Router " + router.getInstanceName() + " is not fully up yet, we will wait"); + logger.debug("Router {} is not fully up yet, we will wait", router); } while (vm.getState() == State.Starting) { try { @@ -358,13 +357,13 @@ public class NetworkHelperImpl implements NetworkHelper { if (vm.getState() == State.Running) { if (logger.isDebugEnabled()) { - logger.debug("Router " + router.getInstanceName() + " is now fully up"); + logger.debug("Router {} is now fully up", router); } return router; } - logger.warn("Router " + router.getInstanceName() + " failed to start. current state: " + vm.getState()); + logger.warn("Router {} failed to start. current state: {}", router, vm.getState()); return null; } @@ -404,7 +403,7 @@ public class NetworkHelperImpl implements NetworkHelper { } if (router.getState() == State.Running) { - logger.debug("Redundant router " + router.getInstanceName() + " is already running!"); + logger.debug("Redundant router {} is already running!", router); return router; } @@ -433,9 +432,7 @@ public class NetworkHelperImpl implements NetworkHelper { for (final DomainRouterVO rrouter : routerList) { if (rrouter.getHostId() != null && rrouter.getIsRedundantRouter() && rrouter.getState() == State.Running) { if (routerToBeAvoid != null) { - throw new ResourceUnavailableException("Try to start router " + router.getInstanceName() + "(" + router.getId() + ")" - + ", but there are already two redundant routers with IP " + router.getPublicIpAddress() + ", they are " + rrouter.getInstanceName() + "(" - + rrouter.getId() + ") and " + routerToBeAvoid.getInstanceName() + "(" + routerToBeAvoid.getId() + ")", DataCenter.class, + throw new ResourceUnavailableException(String.format("Try to start router %s(%s), but there are already two redundant routers with IP %s, they are %s(%s) and %s(%s)", router.getInstanceName(), router, router.getPublicIpAddress(), rrouter.getInstanceName(), rrouter, routerToBeAvoid.getInstanceName(), routerToBeAvoid), DataCenter.class, rrouter.getDataCenterId()); } routerToBeAvoid = rrouter; @@ -464,7 +461,7 @@ public class NetworkHelperImpl implements NetworkHelper { for (int i = 0; i < retryIndex; i++) { if (logger.isTraceEnabled()) { - logger.trace("Try to deploy redundant virtual router:" + router.getHostName() + ", for " + i + " time"); + logger.trace("Try to deploy redundant virtual router: {}, for {} time", router, i); } plan.setAvoids(avoids[i]); try { @@ -664,7 +661,7 @@ public class NetworkHelperImpl implements NetworkHelper { for (final HostVO h : hosts) { if (h.getState() == Status.Up) { - logger.debug("Pick up host that has hypervisor type " + h.getHypervisorType() + " in cluster " + cv.getId() + " to start domain router for OVM"); + logger.debug("Pick up host that has hypervisor type {} in cluster {} to start domain router for OVM", h.getHypervisorType(), cv); return h.getHypervisorType(); } } @@ -790,8 +787,9 @@ public class NetworkHelperImpl implements NetworkHelper { && _ipAddressDao.findByIpAndSourceNetworkId(guestNetwork.getId(), startIp).getAllocatedTime() == null) { defaultNetworkStartIp = startIp; } else if (logger.isDebugEnabled()) { - logger.debug("First ipv4 " + startIp + " in network id=" + guestNetwork.getId() - + " is already allocated, can't use it for domain router; will get random ip address from the range"); + logger.debug("First ipv4 {} in network {} is already allocated, " + + "can't use it for domain router; will get random ip " + + "address from the range", startIp, guestNetwork); } } } @@ -812,8 +810,9 @@ public class NetworkHelperImpl implements NetworkHelper { if (startIpv6 != null && _ipv6Dao.findByNetworkIdAndIp(guestNetwork.getId(), startIpv6) == null) { defaultNetworkStartIpv6 = startIpv6; } else if (logger.isDebugEnabled()) { - logger.debug("First ipv6 " + startIpv6 + " in network id=" + guestNetwork.getId() - + " is already allocated, can't use it for domain router; will get random ipv6 address from the range"); + logger.debug("First ipv6 {} in network {} is already allocated, " + + "can't use it for domain router; will get random ipv6 " + + "address from the range", startIpv6, guestNetwork); } } } @@ -901,10 +900,10 @@ public class NetworkHelperImpl implements NetworkHelper { } } if (expire != null && !containsOnlyNumbers(expire, timeEndChar)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: expire is not in timeformat: " + expire); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule: %s Cause: expire is not in timeformat: %s", rule.getLb(), expire)); } if (tablesize != null && !containsOnlyNumbers(tablesize, "kmg")) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: tablesize is not in size format: " + tablesize); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule: %s Cause: tablesize is not in size format: %s", rule.getLb(), tablesize)); } } else if (LbStickinessMethod.StickinessMethodType.AppCookieBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { @@ -923,10 +922,10 @@ public class NetworkHelperImpl implements NetworkHelper { } if (length != null && !containsOnlyNumbers(length, null)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: length is not a number: " + length); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: length is not a number: %s", rule.getLb(), length)); } if (holdTime != null && !containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers(holdTime, null)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: holdtime is not in timeformat: " + holdTime); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: holdtime is not in timeformat: %s", rule.getLb(), holdTime)); } } } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 8a28eeabe21..e171b68399b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -394,7 +394,7 @@ Configurable, StateListener parseHealthCheckResults( - final Map>> checksJson, final long routerId) { - final Map> checksInDb = getHealthChecksFromDb(routerId); + final Map>> checksJson, final DomainRouterVO router) { + final Map> checksInDb = getHealthChecksFromDb(router.getId()); List healthChecks = new ArrayList<>(); final String lastRunKey = "lastRun"; for (String checkType : checksJson.keySet()) { @@ -1420,28 +1428,27 @@ Configurable, StateListener updateDbHealthChecksFromRouterResponse(final long routerId, final String monitoringResult) { + private List updateDbHealthChecksFromRouterResponse(final DomainRouterVO router, final String monitoringResult) { if (StringUtils.isBlank(monitoringResult)) { - logger.warn("Attempted parsing empty monitoring results string for router " + routerId); + logger.warn("Attempted parsing empty monitoring results string for router {}", router); return Collections.emptyList(); } try { - logger.debug("Parsing and updating DB health check data for router: " + routerId + " with data: " + monitoringResult) ; + logger.debug("Parsing and updating DB health check data for router: {} with data: {}", router, monitoringResult); final Type t = new TypeToken>>>() {}.getType(); final Map>> checks = GsonHelper.getGson().fromJson(monitoringResult, t); - return parseHealthCheckResults(checks, routerId); + return parseHealthCheckResults(checks, router); } catch (JsonSyntaxException ex) { logger.error("Unable to parse the result of health checks due to " + ex.getLocalizedMessage(), ex); } @@ -2667,7 +2674,7 @@ Configurable, StateListener routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { logger.warn("Failed to add/remove VPN users: no router found for account and zone"); - throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); + throw new ResourceUnavailableException(String.format("Unable to assign ip addresses, domR doesn't exist for network %s", network), DataCenter.class, network.getDataCenterId()); } for (final DomainRouterVO router : routers) { @@ -3024,8 +3032,9 @@ Configurable, StateListener _accountMgr.getAccount(router.getAccountId())); return; } @@ -3251,8 +3259,8 @@ Configurable, StateListener jobIds = new ArrayList(); for (final DomainRouterVO router : routers) { if (!_nwHelper.checkRouterTemplateVersion(router)) { - logger.debug("Upgrading template for router: " + router.getId()); + logger.debug("Upgrading template for router: {}", router); final Map params = new HashMap(); params.put("ctxUserId", "1"); params.put("ctxAccountId", "" + router.getAccountId()); @@ -3349,7 +3357,7 @@ Configurable, StateListener networkACLs = _networkACLMgr.listNetworkACLItems(guestNetworkId); if (networkACLs != null && !networkACLs.isEmpty()) { - logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for guest network id=" + guestNetworkId); + logger.debug("Found {} network ACLs to apply as a part of VPC VR {} start for guest network {}", networkACLs.size(), domainRouterVO, _networkModel.getNetwork(guestNetworkId)); _commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, guestNetworkId, false); } } @@ -920,18 +920,18 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian Answer answer = cmds.getAnswer("users"); if (answer == null || !answer.getResult()) { String errorMessage = (answer == null) ? "null answer object" : answer.getDetails(); - logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to " + errorMessage); - throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() - + " on domR: " + router.getInstanceName() + " due to " + errorMessage, DataCenter.class, router.getDataCenterId()); + DataCenter zone = _entityMgr.findById(DataCenter.class, router.getDataCenterId()); + Account account = _entityMgr.findById(Account.class, vpn.getAccountId()); + logger.error("Unable to start vpn: unable add users to vpn in zone {} for account {} on domR: {} due to {}", zone, account, router, errorMessage); + throw new ResourceUnavailableException(String.format("Unable to start vpn: Unable to add users to vpn in zone %s for account %s on domR: %s due to %s", zone, account, router.getInstanceName(), errorMessage), DataCenter.class, router.getDataCenterId()); } answer = cmds.getAnswer("startVpn"); if (answer == null || !answer.getResult()) { String errorMessage = (answer == null) ? "null answer object" : answer.getDetails(); - logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " - + errorMessage); - throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to " + errorMessage, DataCenter.class, router.getDataCenterId()); + DataCenter zone = _entityMgr.findById(DataCenter.class, router.getDataCenterId()); + Account account = _entityMgr.findById(Account.class, vpn.getAccountId()); + logger.error("Unable to start vpn in zone {} for account {} on domR: {} due to {}", zone, account, router, errorMessage); + throw new ResourceUnavailableException(String.format("Unable to start vpn in zone %s for account %s on domR: %s due to %s", zone, account, router.getInstanceName(), errorMessage), DataCenter.class, router.getDataCenterId()); } return true; diff --git a/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java b/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java index ccf8f188471..b406d9623a2 100644 --- a/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java +++ b/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java @@ -131,7 +131,7 @@ public class DhcpSubNetRules extends RuleApplier { } } catch (final InsufficientAddressCapacityException e) { logger.info(e.getMessage()); - logger.info("unable to configure dhcp for this VM."); + logger.info("unable to configure dhcp for this VM {}", vm); return false; } // this means we did not create an IP alias on the router. diff --git a/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java b/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java index bb66839fb13..1b827b384d0 100644 --- a/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java +++ b/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java @@ -60,7 +60,7 @@ public class PrivateGatewayRules extends RuleApplier { final NetworkHelper networkHelper = visitor.getVirtualNetworkApplianceFactory().getNetworkHelper(); if (!networkHelper.checkRouterVersion(_router)) { - logger.warn("Router requires upgrade. Unable to send command to router: " + _router.getId()); + logger.warn("Router requires upgrade. Unable to send command to router: {}", _router); return false; } final VirtualMachineManager itMgr = visitor.getVirtualNetworkApplianceFactory().getItMgr(); diff --git a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java index 404c1c88f5a..bfcaca72b31 100644 --- a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java @@ -167,7 +167,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (userVm.getState() == VirtualMachine.State.Destroyed || userVm.getState() == VirtualMachine.State.Expunging) { if (!ignoreVmState) { - throw new InvalidParameterValueException("Invalid user vm: " + userVm.getId()); + throw new InvalidParameterValueException(String.format("Invalid user vm: %s", userVm)); } } @@ -193,7 +193,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules _accountMgr.checkAccess(caller, null, false, rule, userVm); if (userVm.getState() == VirtualMachine.State.Destroyed || userVm.getState() == VirtualMachine.State.Expunging) { - throw new InvalidParameterValueException("Invalid user vm: " + userVm.getId()); + throw new InvalidParameterValueException(String.format("Invalid user vm: %s", userVm)); } } @@ -214,7 +214,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (ipAddress == null) { throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " doesn't exist in the system"); } else if (ipAddress.isOneToOneNat()) { - throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " has static nat enabled"); + throw new InvalidParameterValueException(String.format("Unable to create port forwarding rule; ip %s has static nat enabled", ipAddress)); } final Long networkId = rule.getNetworkId(); @@ -227,7 +227,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.PortForwarding, networkId); - logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network {}, so assigning", network); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; @@ -267,7 +267,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw new InvalidParameterValueException("Unable to create port forwarding rule on address " + ipAddress + ", invalid virtual machine id specified (" + vmId + ")."); } else if (vm.getState() == VirtualMachine.State.Destroyed || vm.getState() == VirtualMachine.State.Expunging) { - throw new InvalidParameterValueException("Invalid user vm: " + vm.getId()); + throw new InvalidParameterValueException(String.format("Invalid user vm: %s", vm)); } // Verify that vm has nic in the network @@ -345,15 +345,15 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw (NetworkRuleConflictException)e; } - throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e); + throw new CloudRuntimeException(String.format("Unable to add rule for the ip %s", ipAddressFinal), e); } }); } finally { // release ip address if ipassoc was perfored if (performedIpAssoc) { //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); + IPAddressVO ip = _ipAddressDao.findById(ipAddress.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ip, network); } } } finally { @@ -462,9 +462,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules Nic guestNic; NicSecondaryIpVO nicSecIp = null; String dstIp = null; + Network network = _networkModel.getNetwork(networkId); try { - Network network = _networkModel.getNetwork(networkId); if (network == null) { throw new InvalidParameterValueException("Unable to find network by id"); } @@ -483,8 +483,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (!isSystemVm) { UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { - throw new InvalidParameterValueException("Can't enable static nat for the address id=" + ipId + ", invalid virtual machine id specified (" + vmId + - ")."); + throw new InvalidParameterValueException(String.format("Can't enable static nat for the address %s, invalid virtual machine id specified (%d).", ipAddress, vmId)); } //associate ip address to network (if needed) if (ipAddress.getAssociatedWithNetworkId() == null) { @@ -492,17 +491,16 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); - logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network {}, so assigning", network); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipId, networkId, false); performedIpAssoc = true; } catch (Exception ex) { - logger.warn("Failed to associate ip id=" + ipId + " to VPC network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate ip {} to VPC network {} as a part of enable static nat", ipAddress, network); return false; } } else if (ipAddress.isPortable()) { - logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + " so associate IP with the network " + - networkId); + logger.info("Portable IP {} is not associated with the network yet so associate IP with the network {}", ipAddress, network); try { // check if StaticNat service is enabled in the network _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); @@ -515,7 +513,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // associate portable IP with guest network ipAddress = _ipAddrMgr.associatePortableIPToGuestNetwork(ipId, networkId, false); } catch (Exception e) { - logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable {} to network {} as a part of enable static nat", ipAddress, network); return false; } } @@ -531,16 +529,15 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules _ipAddrMgr.transferPortableIP(ipId, ipAddress.getAssociatedWithNetworkId(), networkId); ipAddress = _ipAddressDao.findById(ipId); } catch (Exception e) { - logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable {} to network {} as a part of enable static nat", ipAddress, network); return false; } } else { - throw new InvalidParameterValueException("Portable IP: " + ipId + " has associated services " + "in network " + - ipAddress.getAssociatedWithNetworkId() + " so can not be transferred to " + " network " + networkId); + throw new InvalidParameterValueException(String.format("Portable IP: %s has associated services in network %s so can not be transferred to network %s", + ipAddress, _networkModel.getNetwork(ipAddress.getAssociatedWithNetworkId()), network)); } } else { - throw new InvalidParameterValueException("Invalid network Id=" + networkId + ". IP is associated with" + - " a different network than passed network id"); + throw new InvalidParameterValueException(String.format("Invalid network %s. IP is associated with a different network than passed network id", network)); } } else { _networkModel.checkIpForService(ipAddress, Service.StaticNat, null); @@ -592,13 +589,14 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules ipAddress.setVmIp(dstIp); if (_ipAddressDao.update(ipAddress.getId(), ipAddress)) { // enable static nat on the backend - logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); + logger.trace("Enabling static nat for ip address {} and vm {} on the backend", + ipAddress::toString, () -> _vmInstanceDao.findById(vmId)); if (applyStaticNatForIp(ipId, false, caller, false)) { applyUserDataIfNeeded(vmId, network, guestNic); performedIpAssoc = false; // ignor unassignIPFromVpcNetwork in finally block return true; } else { - logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); + logger.warn("Failed to enable static nat rule for ip address {} on the backend", ipAddress); ipAddress.setOneToOneNat(isOneToOneNat); ipAddress.setAssociatedWithVmId(associatedWithVmId); ipAddress.setVmIp(null); @@ -609,10 +607,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } } finally { - if (performedIpAssoc) { - //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); + if (performedIpAssoc) { + //if the rule is the last one for the ip address assigned to VPC, unassign it from the network + IPAddressVO ip = _ipAddressDao.findById(ipAddress.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ip, network); } } return false; @@ -782,7 +780,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules Set ipsToReprogram = new HashSet(); if (rules == null || rules.isEmpty()) { - logger.debug("No port forwarding rules are found for vm id=" + vmId); + logger.debug("No port forwarding rules are found for vm {}", vm); return true; } @@ -977,7 +975,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List staticNatRules = new ArrayList(); if (rules.size() == 0) { - logger.debug("There are no static nat rules to apply for network id=" + networkId); + logger.debug("There are no static nat rules to apply for network {}", _networkModel.getNetwork(networkId)); return true; } @@ -1002,10 +1000,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } @Override - public boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller) { - List ips = _ipAddressDao.listStaticNatPublicIps(networkId); + public boolean applyStaticNatsForNetwork(Network network, boolean continueOnError, Account caller) { + List ips = _ipAddressDao.listStaticNatPublicIps(network.getId()); if (ips.isEmpty()) { - logger.debug("There are no static nat to apply for network id=" + networkId); + logger.debug("There are no static nat to apply for network {}", network); return true; } @@ -1017,7 +1015,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules for (IPAddressVO ip : ips) { // Get nic IP4 address //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), networkId); - StaticNatImpl staticNat = new StaticNatImpl(ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), networkId, ip.getId(), ip.getVmIp(), false); + StaticNatImpl staticNat = new StaticNatImpl(ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), network.getId(), ip.getId(), ip.getVmIp(), false); staticNats.add(staticNat); } @@ -1159,7 +1157,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.StaticNat)); if (logger.isDebugEnabled() && success) { - logger.debug("Successfully released rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + logger.debug("Successfully released rules for ip {} and # of rules now = {}", ipAddress, rules.size()); } return (rules.size() == 0 && success); @@ -1318,13 +1316,13 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // Revoke all firewall rules for the ip try { - logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of disabling static nat for public IP id=" + ipId); - if (!_firewallMgr.revokeFirewallRulesForIp(ipId, callerUserId, caller)) { - logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of disable statis nat"); + logger.debug(String.format("Revoking all %s rules as a part of disabling static nat for public IP %s", Purpose.Firewall, ipAddress)); + if (!_firewallMgr.revokeFirewallRulesForIp(ipAddress, callerUserId, caller)) { + logger.warn("Unable to revoke all the firewall rules for ip {} as a part of disable statis nat", ipAddress); success = false; } } catch (ResourceUnavailableException e) { - logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip {} as a part of ip release", ipAddress, e); success = false; } @@ -1352,7 +1350,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return true; } else { - logger.warn("Failed to disable one to one nat for the ip address id" + ipId); + logger.warn("Failed to disable one to one nat for the ip address {}", ipAddress); ipAddress = _ipAddressDao.findById(ipId); ipAddress.setRuleState(null); _ipAddressDao.update(ipAddress.getId(), ipAddress); @@ -1401,8 +1399,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } @Override - public boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, Account caller, boolean forRevoke) { - List staticNatIps = _ipAddressDao.listStaticNatPublicIps(networkId); + public boolean applyStaticNatForNetwork(Network network, boolean continueOnError, Account caller, boolean forRevoke) { + List staticNatIps = _ipAddressDao.listStaticNatPublicIps(network.getId()); List staticNats = new ArrayList(); for (IpAddress staticNatIp : staticNatIps) { @@ -1411,7 +1409,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (staticNats != null && !staticNats.isEmpty()) { if (forRevoke) { - logger.debug("Found " + staticNats.size() + " static nats to disable for network id " + networkId); + logger.debug("Found {} static nats to disable for network {}", staticNats.size(), network); } try { if (!_ipAddrMgr.applyStaticNats(staticNats, continueOnError, forRevoke)) { @@ -1422,7 +1420,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return false; } } else { - logger.debug("Found 0 static nat rules to apply for network id " + networkId); + logger.debug("Found 0 static nat rules to apply for network id {}", network); } return true; @@ -1536,19 +1534,20 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override public List listAssociatedRulesForGuestNic(Nic nic) { - logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic " + nic.getId()); + logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic {}", nic); List result = new ArrayList(); // add PF rules result.addAll(_portForwardingDao.listByNetworkAndDestIpAddr(nic.getIPv4Address(), nic.getNetworkId())); if(result.size() > 0) { - logger.debug("Found " + result.size() + " portforwarding rule configured for the nic in the network " + nic.getNetworkId()); + logger.debug("Found {} portforwarding rule configured for the nic in the network {}", + result.size(), _networkModel.getNetwork(nic.getNetworkId())); } // add static NAT rules List staticNatRules = _firewallDao.listStaticNatByVmId(nic.getInstanceId()); for (FirewallRuleVO rule : staticNatRules) { if (rule.getNetworkId() == nic.getNetworkId()) { result.add(rule); - logger.debug("Found rule " + rule.getId() + " " + rule.getPurpose() + " configured"); + logger.debug("Found rule {} configured", rule); } } List staticNatIps = _ipAddressDao.listStaticNatPublicIps(nic.getNetworkId()); @@ -1561,7 +1560,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules new FirewallRuleVO(null, ip.getId(), 0, 65535, NetUtils.ALL_PROTO.toString(), nic.getNetworkId(), vm.getAccountId(), vm.getDomainId(), Purpose.StaticNat, null, null, null, null, null); result.add(staticNatRule); - logger.debug("Found rule " + staticNatRule.getId() + " " + staticNatRule.getPurpose() + " configured"); + logger.debug("Found rule {} configured", staticNatRule); } } // add LB rules @@ -1570,7 +1569,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules FirewallRuleVO lbRule = _firewallDao.findById(lb.getLoadBalancerId()); if (lbRule.getNetworkId() == nic.getNetworkId()) { result.add(lbRule); - logger.debug("Found rule " + lbRule.getId() + " " + lbRule.getPurpose() + " configured"); + logger.debug("Found rule {} configured", lbRule); } } return result; @@ -1663,10 +1662,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules rules.add(rule); try { if (!_firewallMgr.applyRules(rules, true, false)) { - throw new CloudRuntimeException("Failed to revoke the existing port forwarding rule:" + id); + throw new CloudRuntimeException(String.format("Failed to revoke the existing port forwarding rule:%s", rule)); } } catch (ResourceUnavailableException ex) { - throw new CloudRuntimeException("Failed to revoke the existing port forwarding rule:" + id + " due to ", ex); + throw new CloudRuntimeException(String.format("Failed to revoke the existing port forwarding rule:%s due to ", rule), ex); } rule = _portForwardingDao.findById(id); @@ -1692,7 +1691,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules //apply new rules if (!applyPortForwardingRules(rule.getSourceIpAddressId(), false, caller)) { - throw new CloudRuntimeException("Failed to apply the new port forwarding rule:" + id); + throw new CloudRuntimeException(String.format("Failed to apply the new port forwarding rule: %s", rule)); } return _portForwardingDao.findById(id); diff --git a/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java b/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java index c196a27bf32..5ae102acf43 100644 --- a/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java +++ b/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import com.cloud.network.dao.NetworkDao; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; import com.cloud.exception.ResourceUnavailableException; @@ -56,6 +57,7 @@ public class VpcIpAssociationRules extends RuleApplier { _ipsToSend = new ArrayList(); NicDao nicDao = visitor.getVirtualNetworkApplianceFactory().getNicDao(); + NetworkDao networkDao = visitor.getVirtualNetworkApplianceFactory().getNetworkDao(); for (PublicIpAddress ipAddr : _ipAddresses) { String broadcastURI = BroadcastDomainType.Vlan.toUri(ipAddr.getVlanTag()).toString(); Nic nic = nicDao.findByNetworkIdInstanceIdAndBroadcastUri(ipAddr.getNetworkId(), _router.getId(), broadcastURI); @@ -63,7 +65,7 @@ public class VpcIpAssociationRules extends RuleApplier { String macAddress = null; if (nic == null) { if (ipAddr.getState() != IpAddress.State.Releasing) { - throw new CloudRuntimeException("Unable to find the nic in network " + ipAddr.getNetworkId() + " to apply the ip address " + ipAddr + " for"); + throw new CloudRuntimeException(String.format("Unable to find the nic in network %s to apply the ip address %s for", networkDao.findById(ipAddr.getNetworkId()), ipAddr)); } logger.debug("Not sending release for ip address " + ipAddr + " as its nic is already gone from VPC router " + _router); } else { diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java index b925137c4ce..067f2fbdbb2 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java @@ -81,23 +81,26 @@ public class SecurityGroupListener implements Listener { @Override public boolean processAnswers(long agentId, long seq, Answer[] answers) { + return processAnswers(agentId, null, null, seq, answers); + } + + @Override + public boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { List affectedVms = new ArrayList(); for (Answer ans : answers) { if (ans instanceof SecurityGroupRuleAnswer) { SecurityGroupRuleAnswer ruleAnswer = (SecurityGroupRuleAnswer)ans; if (ans.getResult()) { - logger.debug("Successfully programmed rule " + ruleAnswer.toString() + " into host " + agentId); + logger.debug("Successfully programmed rule {} into host [id: {}, uuid: {}, name: {}]", ruleAnswer.toString(), agentId, uuid, name); _workDao.updateStep(ruleAnswer.getVmId(), ruleAnswer.getLogSequenceNumber(), Step.Done); recordSuccess(ruleAnswer.getVmId()); } else { _workDao.updateStep(ruleAnswer.getVmId(), ruleAnswer.getLogSequenceNumber(), Step.Error); ; - logger.debug("Failed to program rule " + ruleAnswer.toString() + " into host " + agentId + " due to " + ruleAnswer.getDetails() + - " and updated jobs"); + logger.debug("Failed to program rule {} into host [id: {}, uuid: {}, name: {}] due to {} and updated jobs", ruleAnswer.toString(), agentId, uuid, name, ruleAnswer.getDetails()); if (ruleAnswer.getReason() == FailureReason.CANNOT_BRIDGE_FIREWALL) { - logger.debug("Not retrying security group rules for vm " + ruleAnswer.getVmId() + " on failure since host " + agentId + - " cannot do bridge firewalling"); + logger.debug("Not retrying security group rules for vm {} on failure since host [id: {}, uuid: {}, name: {}] cannot do bridge firewalling", ruleAnswer.getVmId(), agentId, uuid, name); } else if (ruleAnswer.getReason() == FailureReason.PROGRAMMING_FAILED) { if (checkShouldRetryOnFailure(ruleAnswer.getVmId())) { logger.debug("Retrying security group rules on failure for vm " + ruleAnswer.getVmId()); @@ -172,7 +175,7 @@ public class SecurityGroupListener implements Listener { logger.info("Scheduled network rules cleanup, interval=" + cleanupCmd.getInterval()); } catch (AgentUnavailableException e) { //usually hypervisors that do not understand sec group rules. - logger.debug("Unable to schedule network rules cleanup for host " + host.getId(), e); + logger.debug("Unable to schedule network rules cleanup for host {}", host, e); } if (_workTracker != null) { _workTracker.processConnect(host.getId()); diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java index fd5bd448089..dc408602c93 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.host.dao.HostDao; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.user.securitygroup.AuthorizeSecurityGroupEgressCmd; import org.apache.cloudstack.api.command.user.securitygroup.AuthorizeSecurityGroupIngressCmd; @@ -154,6 +155,8 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro @Inject VMInstanceDao _vmDao; @Inject + HostDao hostDao; + @Inject NetworkOrchestrationService _networkMgr; @Inject NetworkModel _networkModel; @@ -758,7 +761,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro // Prevents other threads/management servers from creating duplicate security rules SecurityGroup securityGroup = _securityGroupDao.acquireInLockTable(securityGroupId); if (securityGroup == null) { - logger.warn("Could not acquire lock on network security group: id= " + securityGroupId); + logger.warn("Could not acquire lock on network security group: {}", securityGroup); return null; } List newRules = new ArrayList(); @@ -769,14 +772,14 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro if (ngVO.getId() != securityGroup.getId()) { final SecurityGroupVO tmpGrp = _securityGroupDao.lockRow(ngId, false); if (tmpGrp == null) { - logger.warn("Failed to acquire lock on security group: " + ngId); - throw new CloudRuntimeException("Failed to acquire lock on security group: " + ngId); + logger.warn("Failed to acquire lock on security group: {}", ngVO); + throw new CloudRuntimeException(String.format("Failed to acquire lock on security group: %s", ngVO)); } } SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndAllowedGroupId(securityGroup.getId(), protocolFinal, startPortOrTypeFinal, endPortOrCodeFinal, ngVO.getId()); if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { - logger.warn("The rule already exists. id= " + securityGroupRule.getUuid()); + logger.warn("The rule {} already exists.", securityGroupRule); continue; // rule already exists. } securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrTypeFinal, endPortOrCodeFinal, protocolFinal, ngVO.getId()); @@ -796,7 +799,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } } if (logger.isDebugEnabled()) { - logger.debug("Added " + newRules.size() + " rules to security group " + securityGroup.getName()); + logger.debug("Added {} rules to security group {}", newRules.size(), securityGroup); } return newRules; } catch (Exception e) { @@ -852,8 +855,8 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro // check type if (type != rule.getRuleType()) { - logger.debug("Mismatch in rule type for security rule with id " + id); - throw new InvalidParameterValueException("Mismatch in rule type for security rule with id " + id); + logger.debug("Mismatch in rule type for security rule {}", rule); + throw new InvalidParameterValueException(String.format("Mismatch in rule type for security rule %s", rule)); } // Check permissions @@ -870,12 +873,12 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro // acquire lock on parent group (preserving this logic) groupHandle = _securityGroupDao.acquireInLockTable(rule.getSecurityGroupId()); if (groupHandle == null) { - logger.warn("Could not acquire lock on security group id: " + rule.getSecurityGroupId()); + logger.warn("Could not acquire lock on security group: {}", securityGroup); return false; } _securityGroupRuleDao.remove(id); - logger.debug("revokeSecurityGroupRule succeeded for security rule id: " + id); + logger.debug("revokeSecurityGroupRule succeeded for security rule: {}", rule); return true; } catch (Exception e) { @@ -928,9 +931,9 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro if (group == null) { group = new SecurityGroupVO(name, description, domainId, accountId); group = _securityGroupDao.persist(group); - logger.debug("Created security group " + group + " for account id=" + accountId); + logger.debug("Created security group {} for account [id: {}, name: {}]", group, accountId, accountName); } else { - logger.debug("Returning existing security group " + group + " for account id=" + accountId); + logger.debug("Returning existing security group {} for account [id: {}, name: {}]", group, accountId, accountName); } return group; @@ -1032,14 +1035,14 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro locked = true; return; } - logger.warn("Unable to acquire lock on vm id=" + userVmId); + logger.warn("Unable to acquire lock on vm {}", vm); return; } locked = true; Long agentId = null; VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId); if (log == null) { - logger.warn("Cannot find log record for vm id=" + userVmId); + logger.warn("Cannot find log record for vm {}", vm); return; } seqnum = log.getLogsequence(); @@ -1066,7 +1069,9 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro try { _agentMgr.send(agentId, cmds, _answerListener); } catch (AgentUnavailableException e) { - logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + Long finalAgentId = agentId; + logger.debug("Unable to send ingress rules updates for vm: {} (agent={})", + vm::toString, () -> hostDao.findByIdIncludingRemoved(finalAgentId)); _workDao.updateStep(work.getInstanceId(), seqnum, Step.Done); } @@ -1085,9 +1090,10 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro @Override @DB - public boolean addInstanceToGroups(final Long userVmId, final List groups) { + public boolean addInstanceToGroups(final UserVm userVm, final List groups) { + long userVmId = userVm.getId(); if (!isVmSecurityGroupEnabled(userVmId)) { - logger.trace("User vm " + userVmId + " is not security group enabled, not adding it to security group"); + logger.trace("User vm {} is not security group enabled, not adding it to security group", userVm); return false; } if (groups != null && !groups.isEmpty()) { @@ -1102,16 +1108,15 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro final Set uniqueGroups = new TreeSet(new SecurityGroupVOComparator()); uniqueGroups.addAll(sgs); if (userVm == null) { - logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm {}", userVm); } try { for (SecurityGroupVO securityGroup : uniqueGroups) { // don't let the group be deleted from under us. SecurityGroupVO ngrpLock = _securityGroupDao.lockRow(securityGroup.getId(), false); if (ngrpLock == null) { - logger.warn("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); - throw new ConcurrentModificationException("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" - + securityGroup.getName()); + logger.warn("Failed to acquire lock on network group {}", securityGroup); + throw new ConcurrentModificationException(String.format("Failed to acquire lock on network group %s", securityGroup)); } if (_securityGroupVMMapDao.findByVmIdGroupId(userVmId, securityGroup.getId()) == null) { SecurityGroupVMMapVO groupVmMapVO = new SecurityGroupVMMapVO(securityGroup.getId(), userVmId); @@ -1133,9 +1138,10 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro @Override @DB - public void removeInstanceFromGroups(final long userVmId) { + public void removeInstanceFromGroups(final UserVm vm) { + long userVmId = vm.getId(); if (_securityGroupVMMapDao.countSGForVm(userVmId) < 1) { - logger.trace("No security groups found for vm id=" + userVmId + ", returning"); + logger.trace("No security groups found for vm {}, returning", vm); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -1144,14 +1150,14 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created in // addInstance if (userVm == null) { - logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm {}", vm); } int n = _securityGroupVMMapDao.deleteVM(userVmId); - logger.info("Disassociated " + n + " network groups " + " from uservm " + userVmId); + logger.info("Disassociated {} network groups from uservm {}", n, vm); _userVMDao.releaseFromLockTable(userVmId); } }); - logger.debug("Security group mappings are removed successfully for vm id=" + userVmId); + logger.debug("Security group mappings are removed successfully for vm {}", vm); } @DB @@ -1168,7 +1174,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } if (newName == null) { - logger.debug("security group name is not changed. id=" + groupId); + logger.debug("security group [{}] name is not changed.", group); return group; } @@ -1188,7 +1194,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } if (newName.equals(group.getName())) { - logger.debug("security group name is not changed. id=" + groupId); + logger.debug("security group [{}] name is not changed.", group); return group; } else if (newName.equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { throw new InvalidParameterValueException("The security group name " + SecurityGroupManager.DEFAULT_GROUP_NAME + " is reserved"); @@ -1201,7 +1207,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro group.setName(newName); _securityGroupDao.update(groupId, group); - logger.debug("Updated security group id=" + groupId); + logger.debug("Updated security group {}", group); return group; } @@ -1226,12 +1232,12 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro boolean result = Transaction.execute(new TransactionCallbackWithException() { @Override public Boolean doInTransaction(TransactionStatus status) throws ResourceInUseException { - SecurityGroupVO group = _securityGroupDao.lockRow(groupId, true); - if (group == null) { - throw new InvalidParameterValueException("Unable to find security group by id " + groupId); + SecurityGroupVO groupLock = _securityGroupDao.lockRow(groupId, true); + if (groupLock == null) { + throw new InvalidParameterValueException(String.format("Unable to get lock on security group %s", group)); } - if (group.getName().equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { + if (groupLock.getName().equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { throw new InvalidParameterValueException("The network group default is reserved"); } @@ -1245,7 +1251,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro _securityGroupDao.expunge(groupId); - logger.debug("Deleted security group id=" + groupId); + logger.debug("Deleted security group {}", group); return true; } @@ -1362,17 +1368,17 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro Event event = transition.getEvent(); if (VirtualMachine.State.isVmStarted(oldState, event, newState)) { if (logger.isTraceEnabled()) { - logger.trace("Security Group Mgr: handling start of vm id" + vm.getId()); + logger.trace("Security Group Mgr: handling start of vm {}", vm); } handleVmStarted((VMInstanceVO)vm); } else if (VirtualMachine.State.isVmStopped(oldState, event, newState)) { if (logger.isTraceEnabled()) { - logger.trace("Security Group Mgr: handling stop of vm id" + vm.getId()); + logger.trace("Security Group Mgr: handling stop of vm {}", vm); } handleVmStopped((VMInstanceVO)vm); } else if (VirtualMachine.State.isVmMigrated(oldState, event, newState)) { if (logger.isTraceEnabled()) { - logger.trace("Security Group Mgr: handling migration of vm id" + vm.getId()); + logger.trace("Security Group Mgr: handling migration of vm {}", vm); } handleVmMigrated((VMInstanceVO)vm); } @@ -1408,7 +1414,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro UserVmVO vm = _userVmMgr.getVirtualMachine(vmId); SecurityGroup defaultGroup = getDefaultSecurityGroup(vm.getAccountId()); if (defaultGroup == null) { - logger.warn("Unable to find default security group for account id=" + vm.getAccountId()); + logger.warn("Unable to find default security group for account {}", () -> _accountMgr.getAccount(vm.getAccountId())); return false; } SecurityGroupVMMapVO map = _securityGroupVMMapDao.findByVmIdGroupId(vmId, defaultGroup.getId()); diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java index bd6f0e32bb0..230aa0ec231 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java @@ -172,7 +172,7 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { if (vm != null && vm.getState() == State.Running) { if (logger.isTraceEnabled()) { - logger.trace("SecurityGroupManager v2: found vm, " + userVmId + " state=" + vm.getState()); + logger.trace("SecurityGroupManager v2: found vm {}, state={}", vm, vm.getState()); } Map> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule); Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); @@ -192,18 +192,17 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl { ingressRules, egressRules, nicSecIps); cmd.setMsId(_serverId); if (logger.isDebugEnabled()) { - logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + ":ingress num rules=" + - cmd.getIngressRuleSet().size() + ":egress num rules=" + cmd.getEgressRuleSet().size() + " num cidrs=" + cmd.getTotalNumCidrs() + " sig=" + - cmd.getSignature()); + logger.debug("SecurityGroupManager v2: sending ruleset update for vm {} ingress num rules={} egress num rules={} num cidrs={} sig={}", + vm, cmd.getIngressRuleSet().size(), cmd.getEgressRuleSet().size(), cmd.getTotalNumCidrs(), cmd.getSignature()); } Commands cmds = new Commands(cmd); try { _agentMgr.send(agentId, cmds, _answerListener); if (logger.isTraceEnabled()) { - logger.trace("SecurityGroupManager v2: sent ruleset updates for " + vm.getInstanceName() + " curr queue size=" + _workQueue.size()); + logger.trace("SecurityGroupManager v2: sent ruleset updates for {} curr queue size={}", vm, _workQueue.size()); } } catch (AgentUnavailableException e) { - logger.debug("Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + logger.debug("Unable to send updates for vm: {} (agent={})", vm, hostDao.findByIdIncludingRemoved(agentId)); _workTracker.handleException(agentId); } } diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java index b20e1af4673..5335b24e897 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -117,7 +117,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana if (!applyACLToPrivateGw(privateGateway)) { aclApplyStatus = false; - logger.debug("failed to apply network acl item on private gateway " + privateGateway.getId() + "acl id " + aclId); + logger.debug("failed to apply network acl item on private gateway {} acl {}", privateGateway::getUuid, () -> _networkACLDao.findById(aclId)); break; } } @@ -172,7 +172,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana //Otherwise existing rules will not be removed on the router element logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); if (!revokeACLItemsForPrivateGw(gateway)) { - throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL " + "items for privatewa gateway: " + gateway.getId()); + throw new CloudRuntimeException(String.format("Failed to replace network ACL. Error while removing existing ACL items for private gateway: [id: %d, uuid: %s]", gateway.getId(), gateway.getUuid())); } } @@ -206,7 +206,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); } else { if (!revokeACLItemsForNetwork(network.getId())) { - throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL items for network: " + network.getId()); + throw new CloudRuntimeException(String.format("Failed to replace network ACL. Error while removing existing ACL items for network: %s", network)); } } } @@ -214,7 +214,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana network.setNetworkACLId(acl.getId()); //Update Network ACL if (_networkDao.update(network.getId(), network)) { - logger.debug("Updated network: " + network.getId() + " with Network ACL Id: " + acl.getId() + ", Applying ACL items"); + logger.debug("Updated network: {} with Network ACL: {}, Applying ACL items", network, acl); //Apply ACL to network final Boolean result = applyACLToNetwork(network.getId()); if (result) { @@ -293,12 +293,12 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana } final List aclItems = _networkACLItemDao.listByACL(network.getNetworkACLId()); if (aclItems.isEmpty()) { - logger.debug("Found no network ACL Items for network id=" + networkId); + logger.debug("Found no network ACL Items for network={}", network); return true; } if (logger.isDebugEnabled()) { - logger.debug("Releasing " + aclItems.size() + " Network ACL Items for network id=" + networkId); + logger.debug("Releasing {} Network ACL Items for network={}", aclItems.size(), network); } for (final NetworkACLItemVO aclItem : aclItems) { @@ -311,7 +311,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana final boolean success = applyACLItemsToNetwork(network.getId(), aclItems); if (logger.isDebugEnabled() && success) { - logger.debug("Successfully released Network ACLs for network id=" + networkId + " and # of rules now = " + aclItems.size()); + logger.debug("Successfully released Network ACLs for network={} and # of rules now = {}", network, aclItems.size()); } return success; @@ -322,12 +322,12 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana final long networkACLId = gateway.getNetworkACLId(); final List aclItems = _networkACLItemDao.listByACL(networkACLId); if (aclItems.isEmpty()) { - logger.debug("Found no network ACL Items for private gateway 'id=" + gateway.getId() + "'"); + logger.debug("Found no network ACL Items for private gateway {}", gateway); return true; } if (logger.isDebugEnabled()) { - logger.debug("Releasing " + aclItems.size() + " Network ACL Items for private gateway id=" + gateway.getId()); + logger.debug("Releasing {} Network ACL Items for private gateway {}", aclItems.size(), gateway); } for (final NetworkACLItemVO aclItem : aclItems) { @@ -340,7 +340,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana final boolean success = applyACLToPrivateGw(gateway, aclItems); if (logger.isDebugEnabled() && success) { - logger.debug("Successfully released Network ACLs for private gateway id=" + gateway.getId() + " and # of rules now = " + aclItems.size()); + logger.debug("Successfully released Network ACLs for private gateway={} and # of rules now = {}", gateway, aclItems.size()); } return success; @@ -437,7 +437,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana continue; } foundProvider = true; - logger.debug("Applying NetworkACL for network: " + network.getId() + " with Network ACL service provider"); + logger.debug("Applying NetworkACL for network: {} with Network ACL service provider", network); handled = element.applyNetworkACLs(network, rules); if (handled) { // publish message on message bus, so that network elements implementing distributed routing @@ -447,7 +447,7 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana } } if (!foundProvider) { - logger.debug("Unable to find NetworkACL service provider for network: " + network.getId()); + logger.debug("Unable to find NetworkACL service provider for network: {}", network); } return handled; } diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java index 54338173282..0791ca7ecd3 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -264,7 +264,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ } _accountMgr.checkAccess(caller, null, true, vpc); if (!gateway.getVpcId().equals(acl.getVpcId())) { - throw new InvalidParameterValueException("private gateway: " + privateGatewayId + " and ACL: " + aclId + " do not belong to the same VPC"); + throw new InvalidParameterValueException(String.format("private gateway: %s and ACL: %s do not belong to the same VPC", vo, acl)); } } @@ -301,7 +301,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ validateAclAssociatedToVpc(acl.getVpcId(), caller, acl.getUuid()); if (!network.getVpcId().equals(acl.getVpcId())) { - throw new InvalidParameterValueException("Network: " + networkId + " and ACL: " + aclId + " do not belong to the same VPC"); + throw new InvalidParameterValueException(String.format("Network: %s and ACL: %s do not belong to the same VPC", network, acl)); } } @@ -510,7 +510,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ * @return the Id of the network ACL that is created. */ protected Long createAclListForNetworkAndReturnAclListId(CreateNetworkACLCmd aclItemCmd, Network network) { - logger.debug("Network " + network.getId() + " is not associated with any ACL. Creating an ACL before adding acl item"); + logger.debug("Network {} is not associated with any ACL. Creating an ACL before adding acl item", network); if (!networkModel.areServicesSupportedByNetworkOffering(network.getNetworkOfferingId(), Network.Service.NetworkACL)) { throw new InvalidParameterValueException("Network Offering does not support NetworkACL service"); @@ -525,18 +525,18 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ String description = "ACL for " + aclName; NetworkACL acl = _networkAclMgr.createNetworkACL(aclName, description, network.getVpcId(), aclItemCmd.isDisplay()); if (acl == null) { - throw new CloudRuntimeException("Error while create ACL before adding ACL Item for network " + network.getId()); + throw new CloudRuntimeException(String.format("Error while create ACL before adding ACL Item for network %s", network)); } - logger.debug("Created ACL: " + aclName + " for network " + network.getId()); + logger.debug("Created ACL: {} for network {}", aclName, network); Long aclId = acl.getId(); //Apply acl to network try { if (!_networkAclMgr.replaceNetworkACL(acl, (NetworkVO)network)) { - throw new CloudRuntimeException("Unable to apply auto created ACL to network " + network.getId()); + throw new CloudRuntimeException(String.format("Unable to apply auto created ACL to network %s", network)); } - logger.debug("Created ACL is applied to network " + network.getId()); + logger.debug("Created ACL is applied to network {}", network); } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException("Unable to apply auto created ACL to network " + network.getId(), e); + throw new CloudRuntimeException(String.format("Unable to apply auto created ACL to network %s", network), e); } return aclId; } @@ -1063,7 +1063,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ */ protected void validateAclConsistency(MoveNetworkAclItemCmd moveNetworkAclItemCmd, NetworkACLVO lockedAcl, List allAclRules) { if (CollectionUtils.isEmpty(allAclRules)) { - logger.debug(String.format("No ACL rules for [id=%s, name=%s]. Therefore, there is no need for consistency validation.", lockedAcl.getUuid(), lockedAcl.getName())); + logger.debug("No ACL rules for {}. Therefore, there is no need for consistency validation.", lockedAcl); return; } String aclConsistencyHash = moveNetworkAclItemCmd.getAclConsistencyHash(); diff --git a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index 6d24c0fe700..399abbfad17 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@ -985,8 +985,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // (the offering can be disabled though) final int vpcCount = vpcDao.getVpcCountByOfferingId(offId); if (vpcCount > 0) { - throw new InvalidParameterValueException("Can't delete vpc offering " + offId + " as its used by " + vpcCount + " vpcs. " - + "To make the network offering unavailable, disable it"); + throw new InvalidParameterValueException(String.format("Can't delete vpc offering %s as its used by %d vpcs. To make the network offering unavailable, disable it", offering, vpcCount)); } if (_vpcOffDao.remove(offId)) { @@ -1118,8 +1117,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis vpcOfferingDetailsDao.persist(detailVO); } } - logger.debug("Updated VPC offeirng id=" + vpcOffId); - return _vpcOffDao.findById(vpcOffId); + VpcOfferingVO updatedVpcOffering = _vpcOffDao.findById(vpcOffId); + logger.debug("Updated VPC offering {}", updatedVpcOffering); + return updatedVpcOffering; } @Override @@ -1360,7 +1360,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis logger.debug(String.format("Reserving a source NAT IP for NSX VPC %s", vpc.getName())); sourceNatIP = reserveSourceNatIpForNsxVpc(account, zone); } - IpAddress ip = _ipAddrMgr.allocateIp(account, false, CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), zone, null, sourceNatIP); + IpAddress ip = _ipAddrMgr.allocateIp(account, false, CallContext.current().getCallingAccount(), CallContext.current().getCallingUser(), zone, null, sourceNatIP); this.associateIPToVpc(ip.getId(), vpc.getId()); } catch (ResourceAllocationException | ResourceUnavailableException | InsufficientAddressCapacityException e){ throw new CloudRuntimeException("new source NAT address cannot be acquired", e); @@ -1495,7 +1495,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } // cleanup vpc resources - if (!cleanupVpcResources(vpc.getId(), caller, callerUserId)) { + if (!cleanupVpcResources(vpc, caller, callerUserId)) { logger.warn("Failed to cleanup resources for vpc " + vpc); return false; } @@ -1557,7 +1557,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis boolean restartRequired = checkAndUpdateRouterSourceNatIp(vpcToUpdate, sourceNatIp); if (vpcDao.update(vpcId, vpc) || restartRequired) { // Note that the update may fail because nothing has changed, other than the sourcenat ip - logger.debug("Updated VPC id=" + vpcId); + logger.debug("Updated VPC {}", vpc); if (restartRequired) { if (logger.isDebugEnabled()) { logger.debug(String.format("restarting vpc %s/%s, due to changing sourcenat in Update VPC call", vpc.getName(), vpc.getUuid())); @@ -2188,19 +2188,19 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return vpcs; } - public boolean cleanupVpcResources(final long vpcId, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - logger.debug("Cleaning up resources for vpc id=" + vpcId); + public boolean cleanupVpcResources(final Vpc vpc, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { + logger.debug("Cleaning up resources for vpc {}", vpc); boolean success = true; // 1) Remove VPN connections and VPN gateway logger.debug("Cleaning up existed site to site VPN connections"); - _s2sVpnMgr.cleanupVpnConnectionByVpc(vpcId); + _s2sVpnMgr.cleanupVpnConnectionByVpc(vpc.getId()); logger.debug("Cleaning up existed site to site VPN gateways"); - _s2sVpnMgr.cleanupVpnGatewayByVpc(vpcId); + _s2sVpnMgr.cleanupVpnGatewayByVpc(vpc.getId()); // 2) release all ip addresses - final List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpcId, null); - logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); + final List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpc.getId(), null); + logger.debug("Releasing ips for vpc {} as a part of vpc cleanup", vpc); for (final IPAddressVO ipToRelease : ipsToRelease) { if (ipToRelease.isPortable()) { // portable IP address are associated with owner, until @@ -2211,38 +2211,38 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _ipAddressDao.update(ipToRelease.getId(), ipToRelease); logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); } else { - success = success && _ipAddrMgr.disassociatePublicIpAddress(ipToRelease.getId(), callerUserId, caller); + success = success && _ipAddrMgr.disassociatePublicIpAddress(ipToRelease, callerUserId, caller); if (!success) { - logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); + logger.warn("Failed to cleanup ip {} as a part of vpc {} cleanup", ipToRelease, vpc); } } } if (success) { - logger.debug("Released ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.debug("Released ip addresses for vpc {} as a part of cleanup vpc process", vpc); } else { - logger.warn("Failed to release ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.warn("Failed to release ip addresses for vpc {} as a part of cleanup vpc process", vpc); // although it failed, proceed to the next cleanup step as it // doesn't depend on the public ip release } // 3) Delete all static route rules - if (!revokeStaticRoutesForVpc(vpcId, caller)) { - logger.warn("Failed to revoke static routes for vpc " + vpcId + " as a part of cleanup vpc process"); + if (!revokeStaticRoutesForVpc(vpc, caller)) { + logger.warn("Failed to revoke static routes for vpc {} as a part of cleanup vpc process", vpc); return false; } // 4) Delete private gateways - final List gateways = getVpcPrivateGateways(vpcId); + final List gateways = getVpcPrivateGateways(vpc.getId()); if (gateways != null) { for (final PrivateGateway gateway : gateways) { if (gateway != null) { - logger.debug("Deleting private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleting private gateway {} as a part of vpc {} resources cleanup", gateway, vpc); if (!deleteVpcPrivateGateway(gateway.getId())) { success = false; - logger.debug("Failed to delete private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Failed to delete private gateway {} as a part of vpc {} resources cleanup", gateway, vpc); } else { - logger.debug("Deleted private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleted private gateway {} as a part of vpc {} resources cleanup", gateway, vpc); } } } @@ -2253,7 +2253,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis searchBuilder.and("vpcId", searchBuilder.entity().getVpcId(), Op.IN); final SearchCriteria searchCriteria = searchBuilder.create(); - searchCriteria.setParameters("vpcId", vpcId); + searchCriteria.setParameters("vpcId", vpc.getId()); final Filter filter = new Filter(NetworkACLVO.class, "id", false, null, null); final Pair, Integer> aclsCountPair = _networkAclDao.searchAndCount(searchCriteria, filter); @@ -2263,15 +2263,14 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _networkAclMgr.deleteNetworkACL(networkAcl); } - routedIpv4Manager.releaseBgpPeersForVpc(vpcId); - routedIpv4Manager.releaseIpv4SubnetForVpc(vpcId); + routedIpv4Manager.releaseBgpPeersForVpc(vpc.getId()); + routedIpv4Manager.releaseIpv4SubnetForVpc(vpc.getId()); - VpcVO vpc = vpcDao.findById(vpcId); annotationDao.removeByEntityType(AnnotationService.EntityType.VPC.name(), vpc.getUuid()); ASNumberVO asNumber = asNumberDao.findByZoneAndVpcId(vpc.getZoneId(), vpc.getId()); if (asNumber != null) { - logger.debug(String.format("Releasing AS number %s from VPC %s", asNumber.getAsNumber(), vpc.getName())); + logger.debug("Releasing AS number {} from VPC {}", asNumber.getAsNumber(), vpc); bgpService.releaseASNumber(vpc.getZoneId(), asNumber.getAsNumber(), true); } @@ -2460,7 +2459,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // try to create it } if (privateNtwk == null) { - logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri + " and associated network id: " + associatedNetworkId); + logger.info("creating new network for vpc {} using broadcast uri: {} and associated network: {}", vpc, broadcastUri, _ntwkDao.findById(associatedNetworkId)); final String networkName = "vpc-" + vpc.getName() + "-privateNetwork"; privateNtwk = _ntwkSvc.createPrivateNetwork(networkName, networkName, physicalNetworkIdFinal, broadcastUri, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId, isSourceNat, networkOfferingId, bypassVlanOverlapCheck, associatedNetworkId); @@ -2480,7 +2479,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final Long nextMac = mac + 1; dc.setMacAddress(nextMac); - logger.info("creating private ip address for vpc (" + ipAddress + ", " + privateNtwk.getId() + ", " + nextMac + ", " + vpcId + ", " + isSourceNat + ")"); + logger.info("creating private ip address for vpc ({}, {}, {}, {}, {})", ipAddress, privateNtwk, nextMac, vpcId, isSourceNat); privateIp = new PrivateIpVO(ipAddress, privateNtwk.getId(), nextMac, vpcId, isSourceNat); _privateIpDao.persist(privateIp); @@ -2671,7 +2670,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final VpcGatewayVO gatewayVO = _vpcGatewayDao.acquireInLockTable(gatewayId); if (gatewayVO == null || gatewayVO.getType() != VpcGateway.Type.Private) { - throw new ConcurrentOperationException("Unable to lock gateway " + gatewayId); + throw new ConcurrentOperationException(String.format("Unable to lock gateway %s", gatewayToBeDeleted)); } final Account caller = CallContext.current().getCallingAccount(); @@ -2745,6 +2744,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis protected boolean deletePrivateGatewayFromTheDB(final PrivateGateway gateway) { // check if there are ips allocted in the network final long networkId = gateway.getNetworkId(); + NetworkVO network = _ntwkDao.findById(networkId); vpcTxCallable.setGateway(gateway); @@ -2759,12 +2759,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final Account owner = _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); _ntwkMgr.destroyNetwork(networkId, context, false); - logger.debug("Deleted private network id=" + networkId); + logger.debug("Deleted private network {}", network); } - } catch (final InterruptedException e) { - logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); - } catch (final ExecutionException e) { - logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); + } catch (final InterruptedException | ExecutionException e) { + logger.error("deletePrivateGatewayFromTheDB failed to delete network {} due to => ", network, e); } return true; @@ -2922,10 +2920,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } @DB - protected boolean revokeStaticRoutesForVpc(final long vpcId, final Account caller) throws ResourceUnavailableException { + protected boolean revokeStaticRoutesForVpc(final Vpc vpc, final Account caller) throws ResourceUnavailableException { // get all static routes for the vpc - final List routes = _staticRouteDao.listByVpcId(vpcId); - logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); + final List routes = _staticRouteDao.listByVpcId(vpc.getId()); + logger.debug("Found {} to revoke for the vpc {}", routes.size(), vpc); if (!routes.isEmpty()) { // mark all of them as revoke Transaction.execute(new TransactionCallbackNoReturn() { @@ -2936,7 +2934,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } }); - return applyStaticRoutesForVpc(vpcId); + return applyStaticRoutesForVpc(vpc.getId()); } return true; @@ -3223,7 +3221,13 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override public void unassignIPFromVpcNetwork(final long ipId, final long networkId) { - final IPAddressVO ip = _ipAddressDao.findById(ipId); + IPAddressVO ip = _ipAddressDao.findById(ipId); + Network network = _ntwkModel.getNetwork(networkId); + unassignIPFromVpcNetwork(ip, network); + } + + @Override + public void unassignIPFromVpcNetwork(final IPAddressVO ip, final Network network) { if (isIpAllocatedToVpc(ip)) { return; } @@ -3232,23 +3236,23 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return; } - logger.debug("Releasing VPC ip address " + ip + " from vpc network id=" + networkId); + logger.debug("Releasing VPC ip address {} from vpc network {}", ip, network); final long vpcId = ip.getVpcId(); boolean success = false; try { // unassign ip from the VPC router - success = _ipAddrMgr.applyIpAssociations(_ntwkModel.getNetwork(networkId), true); + success = _ipAddrMgr.applyIpAssociations(network, true); } catch (final ResourceUnavailableException ex) { - throw new CloudRuntimeException("Failed to apply ip associations for network id=" + networkId + " as a part of unassigning ip " + ipId + " from vpc", ex); + throw new CloudRuntimeException("Failed to apply ip associations for network id=" + network + " as a part of unassigning ip " + ip + " from vpc", ex); } if (success) { ip.setAssociatedWithNetworkId(null); - _ipAddressDao.update(ipId, ip); - logger.debug("IP address " + ip + " is no longer associated with the network inside vpc id=" + vpcId); + _ipAddressDao.update(ip.getId(), ip); + logger.debug("IP address {} is no longer associated with the network inside vpc {}", ip, vpcDao.findById(vpcId)); } else { - throw new CloudRuntimeException("Failed to apply ip associations for network id=" + networkId + " as a part of unassigning ip " + ipId + " from vpc"); + throw new CloudRuntimeException(String.format("Failed to apply ip associations for network %s as a part of unassigning ip %s from vpc", network, ip)); } logger.debug("Successfully released VPC ip address " + ip + " back to VPC pool "); } @@ -3453,7 +3457,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // Re-program VPC VR or add a new backup router for redundant VPC if (!startVpc(vpc, dest, context)) { - logger.debug("Failed to re-program VPC router or deploy a new backup router for VPC" + vpc); + logger.debug("Failed to re-program VPC router or deploy a new backup router for VPC{}", vpc); return false; } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java b/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java index 072b17ab9b9..45a62973947 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java @@ -21,6 +21,7 @@ import java.util.concurrent.Callable; import javax.inject.Inject; +import com.cloud.network.dao.NetworkDao; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; @@ -40,6 +41,8 @@ public class VpcPrivateGatewayTransactionCallable implements Callable { private VpcGatewayDao _vpcGatewayDao; @Inject private PrivateIpDao _privateIpDao; + @Inject + private NetworkDao networkDao; private PrivateGateway gateway; private boolean deleteNetwork = true; @@ -54,7 +57,7 @@ public class VpcPrivateGatewayTransactionCallable implements Callable { final List privateIps = _privateIpDao.listByNetworkId(networkId); if (privateIps.size() > 1 || !privateIps.get(0).getIpAddress().equalsIgnoreCase(gateway.getIp4Address())) { - logger.debug("Not removing network id=" + gateway.getNetworkId() + " as it has private ip addresses for other gateways"); + logger.debug("Not removing network {} as it has private ip addresses for other gateways", networkDao.findById(gateway.getNetworkId())); deleteNetwork = false; } diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 6cef834b0f7..29c0106dc18 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -329,8 +329,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc }catch (ResourceUnavailableException ex) { vpn.setState(prevState); _remoteAccessVpnDao.update(vpn.getId(), vpn); - logger.debug("Failed to stop the vpn " + vpn.getId() + " , so reverted state to "+ - RemoteAccessVpn.State.Running); + logger.debug("Failed to stop the vpn {}, so reverted state to {}", vpn, RemoteAccessVpn.State.Running); success = false; } finally { if (success|| forceCleanup) { @@ -435,10 +434,10 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @DB @Override - public boolean removeVpnUser(long vpnOwnerId, String username, Account caller) { - final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, username); + public boolean removeVpnUser(Account vpnOwner, String username, Account caller) { + final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwner.getId(), username); if (user == null) { - String errorMessage = String.format("Could not find VPN user=[%s]. VPN owner id=[%s]", username, vpnOwnerId); + String errorMessage = String.format("Could not find VPN user=[%s]. VPN owner=[%s]", username, vpnOwner); logger.debug(errorMessage); throw new InvalidParameterValueException(errorMessage); } @@ -521,14 +520,14 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } @DB - private boolean removeVpnUserWithoutRemoteAccessVpn(long vpnOwnerId, String userName) { - VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, userName); + private boolean removeVpnUserWithoutRemoteAccessVpn(Account vpnOwner, String userName) { + VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(vpnOwner.getId(), userName); if (vpnUser == null) { - logger.error(String.format("VPN user not found with ownerId: %d and username: %s", vpnOwnerId, userName)); + logger.error("VPN user not found with owner: {} and username: {}", vpnOwner, userName); return false; } if (!State.Revoke.equals(vpnUser.getState())) { - logger.error(String.format("VPN user with ownerId: %d and username: %s is not in revoked state, current state: %s", vpnOwnerId, userName, vpnUser.getState())); + logger.error("VPN user with owner: {} and username: {} is not in revoked state, current state: {}", vpnOwner, userName, vpnUser.getState()); return false; } return _vpnUsersDao.remove(vpnUser.getId()); @@ -546,7 +545,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc if (CollectionUtils.isEmpty(vpns)) { if (forRemove) { - return removeVpnUserWithoutRemoteAccessVpn(vpnOwnerId, userName); + return removeVpnUserWithoutRemoteAccessVpn(owner, userName); } logger.warn(String.format("Unable to apply VPN user due to there are no remote access VPNs configured on %s to apply VPN user.", owner.toString())); return true; @@ -578,7 +577,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc if (indexUser == users.size()) { indexUser = 0; } - logger.debug("VPN User " + users.get(indexUser) + (result == null ? " is set on " : (" couldn't be set due to " + result) + " on ") + vpn.getUuid()); + logger.debug("VPN User {}{}{}", users.get(indexUser), result == null ? " is set on " : (" couldn't be set due to " + result) + " on ", vpn); if (result == null) { if (finals[indexUser] == null) { finals[indexUser] = true; diff --git a/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index 094f81607fe..ed83e396abf 100644 --- a/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -134,12 +134,12 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn } Site2SiteVpnGatewayVO gws = _vpnGatewayDao.findByVpcId(vpcId); if (gws != null) { - throw new InvalidParameterValueException("The VPN gateway of VPC " + vpcId + " already existed!"); + throw new InvalidParameterValueException(String.format("The VPN gateway of VPC %s already existed!", vpc)); } //Use source NAT ip for VPC List ips = _ipAddressDao.listByAssociatedVpc(vpcId, true); if (ips.size() != 1) { - throw new CloudRuntimeException("Cannot found source nat ip of vpc " + vpcId); + throw new CloudRuntimeException(String.format("Cannot found source nat ip of vpc %s", vpc)); } Site2SiteVpnGatewayVO gw = new Site2SiteVpnGatewayVO(owner.getAccountId(), owner.getDomainId(), ips.get(0).getId(), vpcId); @@ -266,7 +266,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn Site2SiteVpnGateway vpnGateway = getAndValidateSite2SiteVpnGateway(vpnGatewayId, caller); validateVpnConnectionOfTheRightAccount(customerGateway, vpnGateway); - validateVpnConnectionDoesntExist(vpnGatewayId, customerGatewayId); + validateVpnConnectionDoesntExist(customerGateway, vpnGateway); validatePrerequisiteVpnGateway(vpnGateway); String[] cidrList = customerGateway.getGuestCidrList().split(","); @@ -275,8 +275,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn String vpcCidr = _vpcDao.findById(vpnGateway.getVpcId()).getCidr(); for (String cidr : cidrList) { if (NetUtils.isNetworksOverlap(vpcCidr, cidr)) { - throw new InvalidParameterValueException("The subnets of customer gateway " + customerGatewayId + "'s subnet " + cidr + " is overlapped with VPC cidr " + - vpcCidr + "!"); + throw new InvalidParameterValueException(String.format("The subnets of customer gateway %s subnet %s is overlapped with VPC cidr %s!", customerGateway, cidr, vpcCidr)); } } @@ -335,10 +334,9 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn } } - private void validateVpnConnectionDoesntExist(Long vpnGatewayId, Long customerGatewayId) { - if (_vpnConnectionDao.findByVpnGatewayIdAndCustomerGatewayId(vpnGatewayId, customerGatewayId) != null) { - throw new InvalidParameterValueException("The vpn connection with customer gateway id " + customerGatewayId + " and vpn gateway id " + vpnGatewayId + - " already existed!"); + private void validateVpnConnectionDoesntExist(Site2SiteCustomerGateway customerGateway, Site2SiteVpnGateway vpnGateway) { + if (_vpnConnectionDao.findByVpnGatewayIdAndCustomerGatewayId(vpnGateway.getId(), customerGateway.getId()) != null) { + throw new InvalidParameterValueException(String.format("The vpn connection with customer gateway %s and vpn gateway %s already existed!", customerGateway, vpnGateway)); } } @@ -414,7 +412,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn long id = gw.getId(); List vpnConnections = _vpnConnectionDao.listByCustomerGatewayId(id); if (!CollectionUtils.isEmpty(vpnConnections)) { - throw new InvalidParameterValueException("Unable to delete VPN customer gateway with id " + id + " because there is still related VPN connections!"); + throw new InvalidParameterValueException(String.format("Unable to delete VPN customer gateway %s because there is still related VPN connections!", gw)); } annotationDao.removeByEntityType(AnnotationService.EntityType.VPN_CUSTOMER_GATEWAY.name(), gw.getUuid()); _customerGatewayDao.remove(id); @@ -424,7 +422,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn protected void doDeleteVpnGateway(Site2SiteVpnGateway gw) { List conns = _vpnConnectionDao.listByVpnGatewayId(gw.getId()); if (!CollectionUtils.isEmpty(conns)) { - throw new InvalidParameterValueException("Unable to delete VPN gateway " + gw.getId() + " because there is still related VPN connections!"); + throw new InvalidParameterValueException(String.format("Unable to delete VPN gateway %s because there is still related VPN connections!", gw)); } _vpnGatewayDao.remove(gw.getId()); } @@ -546,7 +544,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn } catch (PermissionDeniedException e) { // Just don't restart this connection, as the user has no rights to it // Maybe should issue a notification to the system? - logger.info("Site2SiteVpnManager:updateCustomerGateway() Not resetting VPN connection " + conn.getId() + " as user lacks permission"); + logger.info("Site2SiteVpnManager:updateCustomerGateway() Not resetting VPN connection {} as user lacks permission", conn); continue; } @@ -814,7 +812,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn } Site2SiteVpnConnectionVO lock = _vpnConnectionDao.acquireInLockTable(conn.getId()); if (lock == null) { - throw new CloudRuntimeException("Unable to acquire lock on " + conn); + throw new CloudRuntimeException(String.format("Unable to acquire lock on vpn connection %s", conn)); } try { if (conn.getState() == Site2SiteVpnConnection.State.Connected || conn.getState() == Site2SiteVpnConnection.State.Connecting) { @@ -861,7 +859,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn startVpnConnection(conn.getId()); } catch (ResourceUnavailableException e) { Site2SiteCustomerGatewayVO gw = _customerGatewayDao.findById(conn.getCustomerGatewayId()); - logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection " + conn.getId() + " which connect to " + gw.getName()); + logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection {} which connect to {}", conn, gw); } } } diff --git a/server/src/main/java/com/cloud/projects/ProjectManager.java b/server/src/main/java/com/cloud/projects/ProjectManager.java index 123284955fa..5f58205208b 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManager.java +++ b/server/src/main/java/com/cloud/projects/ProjectManager.java @@ -35,7 +35,7 @@ public interface ProjectManager extends ProjectService { boolean canModifyProjectAccount(Account caller, long accountId); - boolean deleteAccountFromProject(long projectId, long accountId); + boolean deleteAccountFromProject(long projectId, Account account); List listPermittedProjectAccounts(long accountId); diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index 16e3925330d..7a743e3ce76 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -379,7 +379,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C boolean updateResult = Transaction.execute(new TransactionCallback() { @Override public Boolean doInTransaction(TransactionStatus status) { - logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); + logger.debug("Marking project {} with state {} as a part of project delete...", project, State.Disabled); project.setState(State.Disabled); boolean updateResult = _projectDao.update(project.getId(), project); //owner can be already removed at this point, so adding the conditional check @@ -395,7 +395,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C if (updateResult) { //pass system caller when clenaup projects account if (!cleanupProject(project, _accountDao.findById(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM)) { - logger.warn("Failed to cleanup project's id=" + project.getId() + " resources, not removing the project yet"); + logger.warn("Failed to cleanup project's ({}) resources, not removing the project yet", project); return false; } else { //check if any Tungsten-Fabric provider exists and delete the project from Tungsten-Fabric providers @@ -403,7 +403,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C return _projectDao.remove(project.getId()); } } else { - logger.warn("Failed to mark the project id=" + project.getId() + " with state " + State.Disabled); + logger.warn("Failed to mark the project {} with state {}", project, State.Disabled); return false; } } @@ -413,7 +413,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C boolean result = true; //Delete project's account AccountVO account = _accountDao.findById(project.getProjectAccountId()); - logger.debug("Deleting projects " + project + " internal account id=" + account.getId() + " as a part of project cleanup..."); + logger.debug("Deleting projects {} internal account {} as a part of project cleanup...", project, account); result = result && _accountMgr.deleteAccount(account, callerUserId, caller); @@ -482,20 +482,20 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C @Override @DB - public boolean deleteAccountFromProject(final long projectId, final long accountId) { + public boolean deleteAccountFromProject(final long projectId, final Account account) { return Transaction.execute(new TransactionCallback() { @Override public Boolean doInTransaction(TransactionStatus status) { boolean success = true; //remove account - ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountId); + ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, account.getId()); success = _projectAccountDao.remove(projectAccount.getId()); //remove all invitations for account if (success) { - logger.debug("Removed account " + accountId + " from project " + projectId + " , cleaning up old invitations for account/project..."); - ProjectInvitation invite = _projectInvitationDao.findByAccountIdProjectId(accountId, projectId); + logger.debug("Removed account {} from project {} , cleaning up old invitations for account/project...", account, projectId); + ProjectInvitation invite = _projectInvitationDao.findByAccountIdProjectId(account.getId(), projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); } @@ -572,7 +572,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C ProjectAccount projectAccountUser = _projectAccountDao.findByProjectIdUserId(projectId, user.getAccountId(), user.getId()); if (projectAccountUser != null) { - logger.info("User with id: " + user.getId() + " is already added to the project with id: " + projectId); + logger.info("User: {} is already added to the project: {}", user, project); return true; } @@ -598,7 +598,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Optional.ofNullable(role).map(ProjectRole::getId).orElse(null)) != null) { return true; } - logger.warn("Failed to add user to project with id: " + projectId); + logger.warn("Failed to add user to project: {}", project); return false; } } @@ -691,8 +691,8 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C } Account currentOwnerAccount = getProjectOwner(projectId); if (currentOwnerAccount == null) { - logger.error("Unable to find the current owner for the project id=" + projectId); - throw new InvalidParameterValueException("Unable to find the current owner for the project id=" + projectId); + logger.error("Unable to find the current owner for the project {}", project); + throw new InvalidParameterValueException(String.format("Unable to find the current owner for the project %s", project)); } if (currentOwnerAccount.getId() != futureOwnerAccount.getId()) { ProjectAccountVO futureOwner = _projectAccountDao.findByProjectIdAccountId(projectId, futureOwnerAccount.getAccountId()); @@ -716,7 +716,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C _resourceLimitMgr.incrementResourceCount(futureOwnerAccount.getId(), ResourceType.project); } else { - logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); + logger.trace("Future owner {}is already the owner of the project {}", newOwnerName, project); } } } @@ -774,8 +774,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C } ProjectAccountVO newProjectUser = _projectAccountDao.findByProjectIdUserId(projectId, user.getAccountId(), userId); if (newProjectUser == null) { - throw new InvalidParameterValueException("User " + userId + - " doesn't belong to the project. Add it to the project first and then change the project's ownership"); + throw new InvalidParameterValueException(String.format("User %s doesn't belong to the project. Add it to the project first and then change the project's ownership", user)); } if (projectOwners.size() == 1 && newProjectUser.getUserId().equals(projectOwners.get(0).getUserId()) @@ -835,7 +834,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C //Check if the account already added to the project ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, account.getId()); if (projectAccount != null) { - logger.debug("Account " + accountName + " already added to the project id=" + projectId); + logger.debug("Account {} already added to the project {}", accountName, project); return true; } } @@ -862,7 +861,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to add account " + accountName + " to project id=" + projectId); + logger.warn("Failed to add account {} to project {}", accountName, project); return false; } } @@ -874,7 +873,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for account " + account.getAccountName() + " to project id=" + project); + logger.warn("Failed to generate invitation for account {} to project {}", account, project); return false; } } @@ -886,7 +885,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email {} to project {}", email, project); return false; } } @@ -900,7 +899,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for account " + user.getUsername() + " to project id=" + project); + logger.warn("Failed to generate invitation for account {} to project {}", user, project); return false; } } else { @@ -910,7 +909,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email {} to project {}", email, project); return false; } } @@ -975,7 +974,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C throw ex; } - return deleteAccountFromProject(projectId, account.getId()); + return deleteAccountFromProject(projectId, account); } @Override @@ -1028,9 +1027,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C if (invite != null) { boolean success = _projectInvitationDao.remove(invite.getId()); if (success){ - logger.info("Successfully deleted invite pending for the user : "+user.getUsername()); + logger.info("Successfully deleted invite pending for the user : {}", user); } else { - logger.info("Failed to delete project invite for user: "+ user.getUsername()); + logger.info("Failed to delete project invite for user: {}", user); } } } @@ -1045,7 +1044,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C success = _projectAccountDao.remove(projectAccount.getId()); if (success) { - logger.debug("Removed user " + user.getId() + " from project. Removing any invite sent to the user"); + logger.debug("Removed user {} from project. Removing any invite sent to the user", user); ProjectInvitation invite = _projectInvitationDao.findByUserIdProjectId(user.getId(), user.getAccountId(), projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); @@ -1118,7 +1117,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C public ProjectInvitation generateTokenBasedInvitation(Project project, Long userId, String email, String token, Role role, Long projectRoleId) { //verify if the invitation was already generated if (activeInviteExists(project, null, null, email)) { - throw new InvalidParameterValueException("There is already a pending invitation for email " + email + " to the project id=" + project); + throw new InvalidParameterValueException(String.format("There is already a pending invitation for email %s to the project %s", email, project)); } ProjectInvitationVO projectInvitationVO = new ProjectInvitationVO(project.getId(), null, project.getDomainId(), email, token); @@ -1136,7 +1135,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C try { sendInvite(token, email, project.getId()); } catch (Exception ex) { - logger.warn("Failed to send project id=" + project + " invitation to the email " + email + "; removing the invitation record from the db", ex); + logger.warn("Failed to send project {} invitation to the email {}; removing the invitation record from the db", project, email, ex); _projectInvitationDao.remove(projectInvitation.getId()); return null; } @@ -1166,7 +1165,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C } private boolean expireInvitation(ProjectInvitationVO invite) { - logger.debug("Expiring invitation id=" + invite.getId()); + logger.debug("Expiring invitation {}", invite); invite.setState(ProjectInvitation.State.Expired); return _projectInvitationDao.update(invite.getId(), invite); } @@ -1227,7 +1226,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C if (invite != null) { if (!_projectInvitationDao.isActive(invite.getId(), _invitationTimeOut) && accept) { expireInvitation(invite); - throw new InvalidParameterValueException("Invitation is expired for account id=" + accountName + " to the project id=" + projectId); + throw new InvalidParameterValueException(String.format("Invitation is expired for account id=%s to the project %s", accountName, project)); } else { final ProjectInvitationVO inviteFinal = invite; final Long accountIdFinal = invite.getAccountId() != -1 ? invite.getAccountId() : accountId; @@ -1250,14 +1249,14 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C if (inviteFinal.getForUserId() == -1) { ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountIdFinal); if (projectAccount != null) { - logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); + logger.debug("Account {} already added to the project {}", accountNameFinal, project); } else { assignAccountToProject(project, accountIdFinal, inviteFinal.getAccountRole(), null, inviteFinal.getProjectRoleId()); } } else { ProjectAccount projectAccount = _projectAccountDao.findByProjectIdUserId(projectId, finalUser.getAccountId(), finalUser.getId()); if (projectAccount != null) { - logger.debug("User " + finalUser.getId() + "has already been added to the project id=" + projectId); + logger.debug("User {} has already been added to the project {}", finalUser, project); } else { assignUserToProject(project, inviteFinal.getForUserId(), finalUser.getAccountId(), inviteFinal.getAccountRole(), inviteFinal.getProjectRoleId()); } @@ -1270,7 +1269,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C }); } } else { - throw new InvalidParameterValueException("Unable to find invitation for account name=" + accountName + " to the project id=" + projectId); + throw new InvalidParameterValueException(String.format("Unable to find invitation for account name=%s to the project=%s", accountName, project)); } return result; @@ -1312,7 +1311,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C Project.State currentState = project.getState(); if (currentState == State.Active) { - logger.debug("The project id=" + projectId + " is already active, no need to activate it again"); + logger.debug("The project {} is already active, no need to activate it again", project); return project; } @@ -1350,7 +1349,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (suspendProject(project)) { - logger.debug("Successfully suspended project id=" + projectId); + logger.debug("Successfully suspended project {}", project); return _projectDao.findById(projectId); } else { CloudRuntimeException ex = new CloudRuntimeException("Failed to suspend project with specified id"); @@ -1406,10 +1405,10 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (_projectInvitationDao.remove(id)) { - logger.debug("Project Invitation id=" + id + " is removed"); + logger.debug("Project Invitation {} is removed", invitation); return true; } else { - logger.debug("Failed to remove project invitation id=" + id); + logger.debug("Failed to remove project invitation {}", invitation); return false; } } @@ -1425,7 +1424,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C for (ProjectInvitationVO invitationToExpire : invitationsToExpire) { invitationToExpire.setState(ProjectInvitation.State.Expired); _projectInvitationDao.update(invitationToExpire.getId(), invitationToExpire); - logger.trace("Expired project invitation id=" + invitationToExpire.getId()); + logger.trace("Expired project invitation {}", invitationToExpire); } } } catch (Exception ex) { diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 94dc9169823..4b26c7d3f38 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -452,9 +452,6 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } final HostPodVO pod = _podDao.findById(podId); - if (pod == null) { - throw new InvalidParameterValueException("Can't find pod with specified podId " + podId); - } // Check if the pod exists in the system if (_podDao.findById(podId) == null) { @@ -462,7 +459,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } // check if pod belongs to the zone if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) { - final InvalidParameterValueException ex = new InvalidParameterValueException("Pod with specified id doesn't belong to the zone " + dcId); + final InvalidParameterValueException ex = new InvalidParameterValueException(String.format("Pod with specified id doesn't belong to the zone %s", zone)); ex.addProxyObject(pod.getUuid(), "podId"); ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; @@ -688,15 +685,16 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } // Check if the pod exists in the system + HostPodVO pod = null; if (podId != null) { - final HostPodVO pod = _podDao.findById(podId); + pod = _podDao.findById(podId); if (pod == null) { throw new InvalidParameterValueException("Can't find pod by id " + podId); } // check if pod belongs to the zone if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) { final InvalidParameterValueException ex = - new InvalidParameterValueException("Pod with specified podId" + podId + " doesn't belong to the zone with specified zoneId" + dcId); + new InvalidParameterValueException(String.format("Pod with specified pod %s doesn't belong to the zone with specified zone %s", pod, zone)); ex.addProxyObject(pod.getUuid(), "podId"); ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; @@ -728,8 +726,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } + ClusterVO cluster = null; if (clusterId != null) { - if (_clusterDao.findById(clusterId) == null) { + cluster = _clusterDao.findById(clusterId); + if (cluster == null) { throw new InvalidParameterValueException("Can't find cluster by id " + clusterId); } @@ -762,11 +762,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (clusterName != null) { - final HostPodVO pod = _podDao.findById(podId); if (pod == null) { throw new InvalidParameterValueException("Can't find pod by id " + podId); } - ClusterVO cluster = new ClusterVO(dcId, podId, clusterName); + cluster = new ClusterVO(dcId, podId, clusterName); cluster.setHypervisorType(hypervisorType); try { cluster = _clusterDao.persist(cluster); @@ -811,7 +810,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } final List hosts = new ArrayList(); - logger.info("Trying to add a new host at " + url + " in data center " + dcId); + logger.info("Trying to add a new host at {} in data center {}", url, zone); boolean isHypervisorTypeSupported = false; for (final Discoverer discoverer : _discoverers) { if (params != null) { @@ -829,7 +828,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, resources = discoverer.find(dcId, podId, clusterId, uri, username, password, hostTags); } catch (final DiscoveryException e) { String errorMsg = String.format("Could not add host at [%s] with zone [%s], pod [%s] and cluster [%s] due to: [%s].", - uri, dcId, podId, clusterId, e.getMessage()); + uri, zone, pod, cluster, e.getMessage()); if (logger.isDebugEnabled()) { logger.debug(errorMsg, e); } @@ -926,8 +925,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, isForceDeleteStorage); if (answer == null) { - throw new CloudRuntimeException("No resource adapter respond to DELETE_HOST event for " + host.getName() + " id = " + hostId + ", hypervisorType is " + - host.getHypervisorType() + ", host type is " + host.getType()); + throw new CloudRuntimeException(String.format("No resource adapter respond to DELETE_HOST event for %s, hypervisorType is %s, host type is %s", + host, host.getHypervisorType(), host.getType())); } if (answer.getIsException()) { @@ -1002,7 +1001,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, storagePool.setClusterId(null); _storagePoolDao.update(poolId, storagePool); _storagePoolDao.remove(poolId); - logger.debug(String.format("Local storage [id: %s] is removed as a part of %s removal", poolId, hostRemoved.toString())); + logger.debug("Local storage [id: {}] is removed as a part of {} removal", storagePool, hostRemoved); } } @@ -1093,9 +1092,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List hosts = listAllHostsInCluster(cmd.getId()); if (hosts.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); + logger.debug("Cluster: {} still has hosts, can't remove", cluster); } - throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); + throw new CloudRuntimeException(String.format("Cluster: %s cannot be removed. Cluster still has hosts", cluster)); } // don't allow to remove the cluster if it has non-removed storage @@ -1103,9 +1102,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List storagePools = _storagePoolDao.listPoolsByCluster(cmd.getId()); if (storagePools.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); + logger.debug("Cluster: {} still has storage pools, can't remove", cluster); } - throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has storage pools"); + throw new CloudRuntimeException(String.format("Cluster: %s cannot be removed. Cluster still has storage pools", cluster)); } if (_clusterDao.remove(cmd.getId())) { @@ -1131,7 +1130,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - logger.error("Unable to delete cluster: " + cmd.getId(), t); + logger.error("Unable to delete cluster: {}", _clusterDao.findById(cmd.getId()), t); return false; } } @@ -1295,7 +1294,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (!ResourceState.isMaintenanceState(host.getResourceState())) { - throw new CloudRuntimeException("Cannot perform cancelMaintenance when resource state is " + host.getResourceState() + ", hostId = " + hostId); + throw new CloudRuntimeException(String.format("Cannot perform cancelMaintenance when resource state is %s, host: %s", host.getResourceState(), host)); } processResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_BEFORE, hostId); @@ -1348,20 +1347,20 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // for the last host in this cluster, destroy SSVM/CPVM and stop all other VMs if (VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType()) || VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { - logger.error(String.format("Maintenance: VM is of type %s. Destroying VM %s (ID: %s) immediately instead of migration.", vm.getType().toString(), vm.getInstanceName(), vm.getUuid())); + logger.error("Maintenance: VM is of type {}. Destroying VM {} immediately instead of migration.", vm.getType(), vm); _haMgr.scheduleDestroy(vm, host.getId()); return; } - logger.error(String.format("Maintenance: No hosts available for migrations. Scheduling shutdown for VM %s instead of migration.", vm.getUuid())); + logger.error("Maintenance: No hosts available for migrations. Scheduling shutdown for VM {} instead of migration.", vm); _haMgr.scheduleStop(vm, host.getId(), WorkType.ForceStop); } private boolean doMaintain(final long hostId) { final HostVO host = _hostDao.findById(hostId); - logger.info("Maintenance: attempting maintenance of host " + host.getUuid()); + logger.info("Maintenance: attempting maintenance of host {}", host); ResourceState hostState = host.getResourceState(); if (!ResourceState.canAttemptMaintenance(hostState)) { - throw new CloudRuntimeException("Cannot perform maintain when resource state is " + hostState + ", hostId = " + hostId); + throw new CloudRuntimeException(String.format("Cannot perform maintain when resource state is %s, host = %s", hostState, host)); } final List vms = _vmDao.listByHostId(hostId); @@ -1371,7 +1370,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final MaintainAnswer answer = (MaintainAnswer)_agentMgr.easySend(hostId, new MaintainCommand()); if (answer == null || !answer.getResult()) { - logger.warn("Unable to send MaintainCommand to host: " + hostId); + logger.warn("Unable to send MaintainCommand to host: {}", host); return false; } @@ -1383,7 +1382,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new CloudRuntimeException(err + e.getMessage()); } - ActionEventUtils.onStartedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), EventTypes.EVENT_MAINTENANCE_PREPARE, "starting maintenance for host " + hostId, hostId, null, true, 0); + ActionEventUtils.onStartedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), EventTypes.EVENT_MAINTENANCE_PREPARE, String.format("starting maintenance for host %s", host), hostId, null, true, 0); _agentMgr.pullAgentToMaintenance(hostId); /* TODO: move below to listener */ @@ -1394,7 +1393,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, List hosts = listAllUpAndEnabledHosts(Host.Type.Routing, host.getClusterId(), host.getPodId(), host.getDataCenterId()); if (CollectionUtils.isEmpty(hosts)) { - logger.warn("Unable to find a host for vm migration in cluster: " + host.getClusterId()); + logger.warn("Unable to find a host for vm migration in cluster: {}", _clusterDao.findById(host.getClusterId())); if (! isClusterWideMigrationPossible(host, vms, hosts)) { return false; } @@ -1421,7 +1420,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new CloudRuntimeException("There are active VMs using the host's local storage pool. Please stop all VMs on this host that use local storage."); } } else { - logger.info("Maintenance: scheduling migration of VM " + vm.getUuid() + " from host " + host.getUuid()); + logger.info("Maintenance: scheduling migration of VM {} from host {}", vm, host); _haMgr.scheduleMigration(vm); } } @@ -1431,7 +1430,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, private boolean isClusterWideMigrationPossible(Host host, List vms, List hosts) { if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId())) { - logger.info("Looking for hosts across different clusters in zone: " + host.getDataCenterId()); + DataCenterVO zone = _dcDao.findById(host.getDataCenterId()); + logger.info("Looking for hosts across different clusters in zone: {}", zone); Long podId = null; for (final VMInstanceVO vm : vms) { if (VirtualMachine.systemVMs.contains(vm.getType())) { @@ -1442,7 +1442,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } hosts.addAll(listAllUpAndEnabledHosts(Host.Type.Routing, null, podId, host.getDataCenterId())); if (CollectionUtils.isEmpty(hosts)) { - logger.warn("Unable to find a host for vm migration in zone: " + host.getDataCenterId()); + logger.warn("Unable to find a host for vm migration in zone: {}", zone); return false; } logger.info("Found hosts in the zone for vm migration: " + hosts); @@ -1476,17 +1476,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { dest = deploymentManager.planDeployment(profile, plan, new DeploymentPlanner.ExcludeList(), null); } catch (InsufficientServerCapacityException e) { - throw new CloudRuntimeException(String.format("Maintenance failed, could not find deployment destination for VM [id=%s, name=%s].", vm.getId(), vm.getInstanceName()), - e); + throw new CloudRuntimeException(String.format("Maintenance failed, could not find deployment destination for VM: %s.", vm), e); } Host destHost = dest.getHost(); try { _vmMgr.migrateWithStorage(vm.getUuid(), host.getId(), destHost.getId(), null); } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException( - String.format("Maintenance failed, could not migrate VM [id=%s, name=%s] with local storage from host [id=%s, name=%s] to host [id=%s, name=%s].", vm.getId(), - vm.getInstanceName(), host.getId(), host.getName(), destHost.getId(), destHost.getName()), e); + throw new CloudRuntimeException(String.format("Maintenance failed, could not migrate VM (%s) with local storage from host (%s) to host (%s).", + vm, host, destHost), e); } } @@ -1525,8 +1523,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (_hostDao.countBy(host.getClusterId(), ResourceState.PrepareForMaintenance, ResourceState.ErrorInPrepareForMaintenance) > 0) { - throw new CloudRuntimeException("There are other servers attempting migrations for maintenance. " + - "Found hosts in PrepareForMaintenance OR ErrorInPrepareForMaintenance STATUS in cluster " + host.getClusterId()); + throw new CloudRuntimeException(String.format("There are other servers attempting migrations for maintenance. " + + "Found hosts in PrepareForMaintenance OR ErrorInPrepareForMaintenance STATUS in cluster %s", + _clusterDao.findById(host.getClusterId()))); } if (_storageMgr.isLocalStorageActiveOnHost(host.getId())) { @@ -1555,10 +1554,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, processResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, hostId); return _hostDao.findById(hostId); } else { - throw new CloudRuntimeException("Unable to prepare for maintenance host " + hostId); + throw new CloudRuntimeException(String.format("Unable to prepare for maintenance host %s", host)); } } catch (final AgentUnavailableException e) { - throw new CloudRuntimeException("Unable to prepare for maintenance host " + hostId); + throw new CloudRuntimeException(String.format("Unable to prepare for maintenance host %s", host)); } } @@ -1598,28 +1597,27 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (host == null || StringUtils.isBlank(host.getName())) { throw new InvalidParameterValueException(String.format("Host [id:%s] does not exist.", hostId)); } else if (host.getRemoved() != null){ - throw new InvalidParameterValueException(String.format("Host [id:%s, name:%s] does not exist or it has been removed.", hostId, host.getName())); + throw new InvalidParameterValueException(String.format("Host [id:%s, uuid: %s, name:%s] does not exist or it has been removed.", hostId, host.getUuid(), host.getName())); } if (host.getResourceState() == ResourceState.Degraded) { - throw new NoTransitionException(String.format("Host [id:%s] was already marked as Degraded.", host.getId())); + throw new NoTransitionException(String.format("Host (%s) was already marked as Degraded.", host)); } if (host.getStatus() != Status.Alert && host.getStatus() != Status.Disconnected) { - throw new InvalidParameterValueException( - String.format("Cannot perform declare host [id=%s, name=%s] as 'Degraded' when host is in %s status", host.getId(), host.getName(), host.getStatus())); + throw new InvalidParameterValueException(String.format("Cannot perform declare host (%s) as 'Degraded' when host is in %s status", host, host.getStatus())); } try { resourceStateTransitTo(host, ResourceState.Event.DeclareHostDegraded, _nodeId); host.setResourceState(ResourceState.Degraded); } catch (NoTransitionException e) { - logger.error(String.format("Cannot transmit host [id:%s, name:%s, state:%s, status:%s] to %s state", host.getId(), host.getName(), host.getState(), host.getStatus(), - ResourceState.Event.DeclareHostDegraded), e); + logger.error("Cannot transmit host [id:{}, uuid: {}, name:{}, state:{}, status:{}] to {} state", + host.getId(), host.getUuid(), host.getName(), host.getState(), host.getStatus(), ResourceState.Event.DeclareHostDegraded, e); throw e; } - scheduleVmsRestart(hostId); + scheduleVmsRestart(host); return host; } @@ -1627,13 +1625,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, /** * This method assumes that the host is Degraded; therefore it schedule VMs to be re-started by the HA manager. */ - private void scheduleVmsRestart(Long hostId) { - List allVmsOnHost = _vmDao.listByHostId(hostId); + private void scheduleVmsRestart(Host host) { + List allVmsOnHost = _vmDao.listByHostId(host.getId()); if (CollectionUtils.isEmpty(allVmsOnHost)) { - logger.debug(String.format("Host [id=%s] was marked as Degraded with no allocated VMs, no need to schedule VM restart", hostId)); + logger.debug("Host ({}) was marked as Degraded with no allocated VMs, no need to schedule VM restart", host); } - logger.debug(String.format("Host [id=%s] was marked as Degraded with a total of %s allocated VMs. Triggering HA to start VMs that have HA enabled.", hostId, allVmsOnHost.size())); + logger.debug("Host ({}) was marked as Degraded with a total of {} allocated VMs. Triggering HA to start VMs that have HA enabled.", host, allVmsOnHost.size()); for (VMInstanceVO vm : allVmsOnHost) { State vmState = vm.getState(); if (vmState == State.Starting || vmState == State.Running || vmState == State.Stopping) { @@ -1651,12 +1649,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, HostVO host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - throw new InvalidParameterValueException(String.format("Host [id=%s] does not exist", host.getId())); + throw new InvalidParameterValueException(String.format("Host (%s) with id %d does not exist", host, hostId)); } if (host.getResourceState() != ResourceState.Degraded) { throw new NoTransitionException( - String.format("Cannot perform cancelHostAsDegraded on host [id=%s, name=%s] when host is in %s state", host.getId(), host.getName(), host.getResourceState())); + String.format("Cannot perform cancelHostAsDegraded on host (%s) when host is in %s state", host, host.getResourceState())); } try { @@ -1664,7 +1662,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, host.setResourceState(ResourceState.Enabled); } catch (NoTransitionException e) { throw new NoTransitionException( - String.format("Cannot transmit host [id=%s, name=%s, state=%s, status=%s] to %s state", host.getId(), host.getName(), host.getResourceState(), host.getStatus(), + String.format("Cannot transmit host (id=%s, uuid=%s, name=%s, state=%s, status=%s] to %s state", host.getId(), host.getUuid(), host.getName(), host.getResourceState(), host.getStatus(), ResourceState.Enabled)); } return host; @@ -1698,11 +1696,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, * Safely transit host into Maintenance mode */ protected boolean setHostIntoMaintenance(HostVO host) throws NoTransitionException { - logger.debug("Host " + host.getUuid() + " entering in Maintenance"); + logger.debug("Host {} entering in Maintenance", host); resourceStateTransitTo(host, ResourceState.Event.InternalEnterMaintenance, _nodeId); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_MAINTENANCE_PREPARE, - "completed maintenance for host " + host.getId(), host.getId(), null, 0); + String.format("completed maintenance for host %s", host), host.getId(), null, 0); return true; } @@ -1712,7 +1710,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, * - Configure VNC access for VMs (KVM hosts only) */ protected boolean setHostIntoErrorInMaintenance(HostVO host, List errorVms) throws NoTransitionException { - logger.debug("Unable to migrate / fix errors for " + errorVms.size() + " VM(s) from host " + host.getUuid()); + logger.debug("Unable to migrate / fix errors for {} VM(s) from host {}", errorVms.size(), host); _haMgr.cancelScheduledMigrations(host); configureVncAccessForKVMHostFailedMigrations(host, errorVms); resourceStateTransitTo(host, ResourceState.Event.UnableToMaintain, _nodeId); @@ -1720,14 +1718,14 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } protected boolean setHostIntoErrorInPrepareForMaintenance(HostVO host, List errorVms) throws NoTransitionException { - logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenanceWithErrors state"); + logger.debug("Host {} entering in PrepareForMaintenanceWithErrors state", host); configureVncAccessForKVMHostFailedMigrations(host, errorVms); resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); return false; } protected boolean setHostIntoPrepareForMaintenanceAfterErrorsFixed(HostVO host) throws NoTransitionException { - logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenance state as any previous corrections have been fixed"); + logger.debug("Host {} entering in PrepareForMaintenance state as any previous corrections have been fixed", host); resourceStateTransitTo(host, ResourceState.Event.ErrorsCorrected, _nodeId); return false; } @@ -2757,8 +2755,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final ClusterVO clusterVO = _clusterDao.findById(host.getClusterId()); if (clusterVO.getHypervisorType() != hyType) { - throw new IllegalArgumentException("Can't add host whose hypervisor type is: " + hyType + " into cluster: " + clusterVO.getId() + - " whose hypervisor type is: " + clusterVO.getHypervisorType()); + throw new IllegalArgumentException(String.format("Can't add host whose hypervisor type is: %s into cluster: %s whose hypervisor type is: %s", + hyType, clusterVO, clusterVO.getHypervisorType())); } CPU.CPUArch hostCpuArch = CPU.CPUArch.fromType(ssCmd.getCpuArch()); if (hostCpuArch != null && clusterVO.getArch() != null && hostCpuArch != clusterVO.getArch()) { @@ -2797,7 +2795,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public void deleteRoutingHost(final HostVO host, final boolean isForced, final boolean forceDestroyStorage) throws UnableDeleteHostException { if (host.getType() != Host.Type.Routing) { - throw new CloudRuntimeException(String.format("Non-Routing host gets in deleteRoutingHost, id is %s", host.getId())); + throw new CloudRuntimeException(String.format("Non-Routing host (%s) gets in deleteRoutingHost", host)); } if (logger.isDebugEnabled()) { @@ -2842,7 +2840,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { resourceStateTransitTo(host, ResourceState.Event.DeleteHost, host.getId()); } catch (final NoTransitionException e) { - logger.debug("Cannot transmit host " + host.getId() + " to Disabled state", e); + logger.debug("Cannot transmit host {} to Disabled state", host, e); } for (final VMInstanceVO vm : vms) { if ((!HighAvailabilityManager.ForceHA.value() && !vm.isHaEnabled()) || vm.getState() == State.Stopping) { @@ -2880,7 +2878,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, * really prefer to exception that always exposes bugs */ if (!ResourceState.isMaintenanceState(host.getResourceState())) { - throw new CloudRuntimeException("Cannot perform cancelMaintenance when resource state is " + host.getResourceState() + ", hostId = " + hostId); + throw new CloudRuntimeException(String.format("Cannot perform cancelMaintenance when resource state is %s, host = %s", host.getResourceState(), host)); } /* TODO: move to listener */ @@ -2890,7 +2888,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List vms = _haMgr.findTakenMigrationWork(); for (final VMInstanceVO vm : vms) { if (vm.getHostId() != null && vm.getHostId() == hostId) { - logger.warn("Unable to cancel migration because the vm is being migrated: " + vm + ", hostId = " + hostId); + logger.warn("Unable to cancel migration because the vm is being migrated: {}, host {}", vm, host); vms_migrating = true; } } @@ -3141,8 +3139,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return false; } else { try { - logger.warn("Migration of VM " + _vmDao.findById(vmId) + " failed from host " + _hostDao.findById(hostId) + - ". Emitting event UnableToMigrate."); + logger.warn("Migration of VM {} failed from host {}. Emitting event UnableToMigrate.", _vmDao.findById(vmId), host); return resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); } catch (final NoTransitionException e) { logger.debug(String.format("No next resource state for %s while current state is [%s] with event %s", host, host.getResourceState(), ResourceState.Event.UnableToMigrate), e); @@ -3320,14 +3317,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public HostStats getHostStatistics(final long hostId) { - final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(_hostDao.findById(hostId).getGuid(), _hostDao.findById(hostId).getName(), hostId)); + HostVO host = _hostDao.findById(hostId); + final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(host.getGuid(), host.getName(), hostId)); if (answer != null && answer instanceof UnsupportedAnswer) { return null; } if (answer == null || !answer.getResult()) { - final String msg = "Unable to obtain host " + hostId + " statistics. "; + final String msg = String.format("Unable to obtain host %s statistics. ", host); logger.warn(msg); return null; } else { @@ -3453,12 +3451,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } @Override - public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) { - if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) { + public boolean isGPUDeviceAvailable(final Host host, final String groupName, final String vgpuType) { + if(!listAvailableGPUDevice(host.getId(), groupName, vgpuType).isEmpty()) { return true; } else { if (logger.isDebugEnabled()) { - logger.debug("Host ID: "+ hostId +" does not have GPU device available"); + logger.debug("Host: {} does not have GPU device available", host); } return false; } @@ -3469,7 +3467,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final List gpuDeviceList = listAvailableGPUDevice(hostId, groupName, vgpuType); if (CollectionUtils.isEmpty(gpuDeviceList)) { - final String errorMsg = "Host " + hostId + " does not have required GPU device or out of capacity. GPU group: " + groupName + ", vGPU Type: " + vgpuType; + final String errorMsg = String.format("Host %s does not have required GPU device or out of capacity. GPU group: %s, vGPU Type: %s", _hostDao.findById(hostId), groupName, vgpuType); logger.error(errorMsg); throw new CloudRuntimeException(errorMsg); } @@ -3540,7 +3538,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true); if (hostReservation == null) { if (logger.isDebugEnabled()) { - logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + logger.debug("Host reservation for host: {} does not even exist. Release reservartion call is ignored.", () -> _hostDao.findById(hostId)); } return false; } @@ -3550,7 +3548,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (logger.isDebugEnabled()) { - logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + logger.debug("Host reservation for host: {} does not even exist. Release reservartion call is ignored.", () -> _hostDao.findById(hostId)); } return false; @@ -3559,7 +3557,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - logger.error("Unable to release host reservation for host: " + hostId, t); + logger.error("Unable to release host reservation for host: {}", _hostDao.findById(hostId), t); return false; } } diff --git a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java index c7bdf9c6f6c..505516d107b 100644 --- a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java @@ -643,7 +643,7 @@ public class RollingMaintenanceManagerImpl extends ManagerBase implements Rollin ClusterDetailsVO clusterDetailsRamOvercommmt = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue()); Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); - boolean hostHasCapacity = capacityManager.checkIfHostHasCapacity(hostInCluster.getId(), cpuRequested, ramRequested, false, + boolean hostHasCapacity = capacityManager.checkIfHostHasCapacity(hostInCluster, cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, false); if (!maxGuestLimit && hostHasCPUCapacity && hostHasCapacity) { canMigrateVm = true; diff --git a/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java b/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java index 943c68c7c8d..6c286edd00d 100644 --- a/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java +++ b/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java @@ -181,7 +181,7 @@ public class ResourceIconManagerImpl extends ManagerBase implements ResourceIcon Pair accountDomainPair = getAccountDomain(id, resourceType); Long domainId = accountDomainPair.second(); Long accountId = accountDomainPair.first(); - resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource ' %s ", caller, id)); + resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource [id: %s, uuid: %s] ", caller, id, resourceUuid)); if (existingResourceIcon == null) { resourceIcon = new ResourceIconVO(id, resourceType, resourceUuid, base64Image); @@ -221,7 +221,7 @@ public class ResourceIconManagerImpl extends ManagerBase implements ResourceIcon Pair accountDomainPair = getAccountDomain(id, resourceType); Long domainId = accountDomainPair.second(); Long accountId = accountDomainPair.first(); - resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource ' %s ", caller, id)); + resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource [id: %s, uuid: %s]", caller, id, resourceId)); resourceIconDao.remove(resourceIcon.getId()); logger.debug("Removed icon for resources (" + String.join(", ", resourceIds) + ")"); diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index b59ddc029ee..f37b661c22f 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -932,7 +932,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim if ((caller.getAccountId() == accountId.longValue()) && (_accountMgr.isDomainAdmin(caller.getId()) || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN)) { // If the admin is trying to update their own account, disallow. - throw new PermissionDeniedException("Unable to update resource limit for their own account " + accountId + ", permission denied"); + throw new PermissionDeniedException(String.format("Unable to update resource limit for their own account %s, permission denied", account)); } if (account.getType() == Account.Type.PROJECT) { @@ -976,8 +976,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim DomainVO parentDomain = _domainDao.findById(parentDomainId); long parentMaximum = findCorrectResourceLimitForDomain(parentDomain, resourceType, tag); if ((parentMaximum >= 0) && (max.longValue() > parentMaximum)) { - throw new InvalidParameterValueException("Domain " + domain.getName() + "(id: " + parentDomain.getId() + ") has maximum allowed resource limit " + parentMaximum + " for " - + resourceType + ", please specify a value less than or equal to " + parentMaximum); + throw new InvalidParameterValueException(String.format("Domain %s has maximum allowed resource limit %d for %s, please specify a value less than or equal to %d", parentDomain, parentMaximum, resourceType, parentMaximum)); } } ownerType = ResourceOwnerType.Domain; @@ -1012,7 +1011,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim "host tags: %s, storage tags: %s", StringUtils.join(hostTags), StringUtils.join(storageTags)); if (ObjectUtils.allNotNull(ownerId, ownerType)) { - msg = String.format("%s for %s ID: %d", msg, ownerType.getName().toLowerCase(), ownerId); + msg = String.format("%s for %s", msg, ownerType == ResourceOwnerType.Account ? _accountDao.findById(ownerId) : _domainDao.findById(ownerId)); } logger.debug(msg); } diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 7926498c123..0c836d7347d 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -1395,7 +1395,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio for (ResourceType resourceType : resourceTypes) { if (!domainCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, domain.getId(), ResourceOwnerType.Domain); - logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); + logger.debug("Inserting resource count of type {} for domain {}", resourceType, domain); _resourceCountDao.persist(resourceCountVO); } } @@ -1424,7 +1424,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio for (ResourceType resourceType : resourceTypes) { if (!accountCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, account.getId(), ResourceOwnerType.Account); - logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); + logger.debug("Inserting resource count of type {} for account {}", resourceType, account); _resourceCountDao.persist(resourceCountVO); } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 2062ee1e94d..76d2943e18c 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -2521,7 +2521,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe freeAddrs.addAll(_ipAddressMgr.listAvailablePublicIps(dcId, null, vlanDbIds, owner, VlanType.VirtualNetwork, associatedNetworkId, false, false, false, null, null, false, cmd.getVpcId(), cmd.isDisplay(), false, false)); // Free } catch (InsufficientAddressCapacityException e) { - logger.warn("no free address is found in zone " + dcId); + logger.warn("no free address is found in zone {}", dc); } } for (IPAddressVO addr: freeAddrs) { @@ -3056,8 +3056,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } - protected ConsoleProxyInfo getConsoleProxyForVm(final long dataCenterId, final long userVmId) { - return _consoleProxyMgr.assignProxy(dataCenterId, userVmId); + protected ConsoleProxyInfo getConsoleProxyForVm(final long dataCenterId, final VMInstanceVO userVm) { + return _consoleProxyMgr.assignProxy(dataCenterId, userVm); } private ConsoleProxyVO startConsoleProxy(final long instanceId) { @@ -3092,7 +3092,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public String getConsoleAccessUrlRoot(final long vmId) { final VMInstanceVO vm = _vmInstanceDao.findById(vmId); if (vm != null) { - final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vmId); + final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vm); if (proxy != null) { return proxy.getProxyImageUrl(); } @@ -3106,7 +3106,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (vm == null) { return new Pair<>(false, "Cannot find a VM with id = " + vmId); } - final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vmId); + final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vm); if (proxy == null) { return new Pair<>(false, "Cannot find a console proxy for the VM " + vmId); } @@ -3137,7 +3137,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public String getConsoleAccessAddress(long vmId) { final VMInstanceVO vm = _vmInstanceDao.findById(vmId); if (vm != null) { - final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vmId); + final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vm); return proxy != null ? proxy.getProxyAddress() : null; } return null; @@ -5028,8 +5028,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe String password = vm.getDetail("Encrypted.Password"); if (StringUtils.isEmpty(password)) { - throw new InvalidParameterValueException(String.format("No password found for VM with id [%s]. When the VM's SSH keypair is changed, the current encrypted password is " - + "removed due to incosistency in the encryptation, as the new SSH keypair is different from which the password was encrypted. To get a new password, it must be reseted.", vmId)); + throw new InvalidParameterValueException(String.format("No password found for VM [%s]. When the VM's SSH keypair is changed, the current encrypted password is " + + "removed due to inconsistency in the encryption, as the new SSH keypair is different from which the password was encrypted. To get a new password, it must be reseted.", vm)); } return password; @@ -5047,19 +5047,19 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - for (final HostVO h : hosts) { + for (final HostVO host : hosts) { if (logger.isDebugEnabled()) { - logger.debug("Changing password for host name = " + h.getName()); + logger.debug("Changing password for host {}", host); } // update password for this host - final DetailVO nv = _detailsDao.findDetail(h.getId(), ApiConstants.USERNAME); + final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); if (nv == null) { - final DetailVO nvu = new DetailVO(h.getId(), ApiConstants.USERNAME, userNameWithoutSpaces); + final DetailVO nvu = new DetailVO(host.getId(), ApiConstants.USERNAME, userNameWithoutSpaces); _detailsDao.persist(nvu); - final DetailVO nvp = new DetailVO(h.getId(), ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); + final DetailVO nvp = new DetailVO(host.getId(), ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else if (nv.getValue().equals(userNameWithoutSpaces)) { - final DetailVO nvp = _detailsDao.findDetail(h.getId(), ApiConstants.PASSWORD); + final DetailVO nvp = _detailsDao.findDetail(host.getId(), ApiConstants.PASSWORD); nvp.setValue(DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else { @@ -5116,7 +5116,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public void doInTransactionWithoutResult(final TransactionStatus status) { if (logger.isDebugEnabled()) { - logger.debug("Changing password for host name = " + host.getName()); + logger.debug("Changing password for host {}", host); } // update password for this host final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 939781f39f7..a5f91b1b3f3 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -656,7 +656,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc metrics.put(hostStatsEntry.getHostId(), hostStatsEntry); _hostStats.put(host.getId(), hostStatsEntry); } else { - logger.warn("The Host stats is null for host: " + host.getId()); + logger.warn("The Host stats is null for host: {}", host); } } @@ -1251,7 +1251,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc metrics.clear(); } } catch (Exception e) { - logger.debug("Failed to get VM stats for host with ID: " + host.getId()); + logger.debug("Failed to get VM stats for host: {}", host); continue; } } @@ -1471,8 +1471,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } if (vmDiskStat_lock == null) { - logger.warn("unable to find vm disk stats from host for account: " + vm.getAccountId() + " with vmId: " + vm.getId() - + " and volumeId:" + volume.getId()); + logger.warn("unable to find vm disk stats from host for account: {} with vm: {} and volume: {}", vm.getAccountId(), vm, volume); continue; } @@ -1518,7 +1517,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } }); } catch (Exception e) { - logger.warn(String.format("Error while collecting vm disk stats from host %s : ", host.getName()), e); + logger.warn("Error while collecting vm disk stats from host {} : ", host, e); } } } @@ -1560,8 +1559,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.debug("Cannot find uservm with id: " + vmId + " , continue"); continue; } - logger.debug("Now we are updating the user_statistics table for VM: " + userVm.getInstanceName() - + " after collecting vm network statistics from host: " + host.getName()); + logger.debug("Now we are updating the user_statistics table for VM: {} after collecting vm network statistics from host: {}", userVm, host); for (VmNetworkStats vmNetworkStat : vmNetworkStats) { VmNetworkStatsEntry vmNetworkStatEntry = (VmNetworkStatsEntry)vmNetworkStat; SearchCriteria sc_nic = _nicDao.createSearchCriteria(); @@ -1586,8 +1584,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } if (vmNetworkStat_lock == null) { - logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() - + " and nicId:" + nic.getId()); + logger.warn("unable to find vm network stats from host for account: {} with vm: {} and nic: {}", userVm.getAccountId(), userVm, nic); continue; } @@ -1623,7 +1620,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } }); } catch (Exception e) { - logger.warn(String.format("Error while collecting vm network stats from host %s : ", host.getName()), e); + logger.warn("Error while collecting vm network stats from host {} : ", host, e); } } } @@ -1709,7 +1706,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc Answer answer = ssAhost.sendMessage(command); if (answer != null && answer.getResult()) { storageStats.put(storeId, (StorageStats)answer); - logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); + logger.trace("Store: {} Used: {} Total Available: {}", store, toHumanReadableSize(((StorageStats) answer).getByteUsed()), toHumanReadableSize(((StorageStats) answer).getCapacityBytes())); } } updateStorageStats(storageStats); @@ -1738,8 +1735,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc pool.setCapacityBytes(capacityBytes); poolNeedsUpdating = true; } else { - logger.warn("Not setting capacity bytes, received {} capacity for pool ID {}", - NumbersUtil.toReadableSize(((StorageStats)answer).getCapacityBytes()), poolId); + logger.warn("Not setting capacity bytes, received {} capacity for pool {}", + NumbersUtil.toReadableSize(((StorageStats)answer).getCapacityBytes()), pool); } } if (((_storagePoolStats.get(poolId) != null && _storagePoolStats.get(poolId).getByteUsed() != usedBytes) @@ -1859,12 +1856,12 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc String readableTotalCapacity = NumbersUtil.toReadableSize((long) totalCapacity); String readableUsedCapacity = NumbersUtil.toReadableSize((long) usedCapacity); - logger.printf(Level.DEBUG, "Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100); + logger.printf(Level.DEBUG, "Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStore, readableTotalCapacity, readableUsedCapacity, threshold * 100); if (usedCapacity / totalCapacity <= threshold) { return true; } - logger.printf(Level.WARN, "Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100); + logger.printf(Level.WARN, "Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStore, readableTotalCapacity, readableUsedCapacity, threshold * 100); return false; } @@ -1891,7 +1888,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc * Sends VMs metrics to the configured graphite host. */ protected void sendVmMetricsToGraphiteHost(Map metrics, HostVO host) { - logger.debug(String.format("Sending VmStats of host %s to %s host %s:%s", host.getId(), externalStatsType, externalStatsHost, externalStatsPort)); + logger.debug("Sending VmStats of host {} to {} host {}:{}", host, externalStatsType, externalStatsHost, externalStatsPort); try { GraphiteClient g = new GraphiteClient(externalStatsHost, externalStatsPort); g.sendMetrics(metrics); diff --git a/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java index ad884a33406..2b786a8f1ef 100644 --- a/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java @@ -209,14 +209,14 @@ public class ConsoleProxyServlet extends HttpServlet { } if (vm.getHostId() == null) { - LOGGER.warn("VM " + vmId + " lost host info, sending blank response for thumbnail request"); + LOGGER.warn("VM {} lost host info, sending blank response for thumbnail request", vm); sendResponse(resp, ""); return; } HostVO host = _ms.getHostBy(vm.getHostId()); if (host == null) { - LOGGER.warn("VM " + vmId + "'s host does not exist, sending blank response for thumbnail request"); + LOGGER.warn("VM {}'s host does not exist, sending blank response for thumbnail request", vm); sendResponse(resp, ""); return; } @@ -263,14 +263,14 @@ public class ConsoleProxyServlet extends HttpServlet { } if (vm.getHostId() == null) { - LOGGER.warn("VM " + vmId + " lost host info, failed response for authentication request from console proxy"); + LOGGER.warn("VM {} lost host info, failed response for authentication request from console proxy", vm); sendResponse(resp, "failed"); return; } HostVO host = _ms.getHostBy(vm.getHostId()); if (host == null) { - LOGGER.warn("VM " + vmId + "'s host does not exist, sending failed response for authentication request from console proxy"); + LOGGER.warn("VM {}'s host does not exist, sending failed response for authentication request from console proxy", vm); sendResponse(resp, "failed"); return; } @@ -434,14 +434,17 @@ public class ConsoleProxyServlet extends HttpServlet { } catch (PermissionDeniedException ex) { if (_accountMgr.isNormalUser(accountObj.getId())) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug("VM access is denied. VM owner account " + vm.getAccountId() + " does not match the account id in session " + - accountObj.getId() + " and caller is a normal user"); + LOGGER.debug("VM access is denied. VM owner account {} does not " + + "match the account id in session {} and caller is a normal user", + _accountMgr.getAccount(vm.getAccountId()), accountObj); } } else if (_accountMgr.isDomainAdmin(accountObj.getId()) || accountObj.getType() == Account.Type.READ_ONLY_ADMIN) { if(LOGGER.isDebugEnabled()) { - LOGGER.debug("VM access is denied. VM owner account " + vm.getAccountId() - + " does not match the account id in session " + accountObj.getId() + " and the domain-admin caller does not manage the target domain"); + LOGGER.debug("VM access is denied. VM owner account {} does not " + + "match the account id in session {} and the domain-admin caller " + + "does not manage the target domain", + _accountMgr.getAccount(vm.getAccountId()), accountObj); } } return false; @@ -479,7 +482,7 @@ public class ConsoleProxyServlet extends HttpServlet { if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.ENABLED) || (account == null) || !account.getState().equals(Account.State.ENABLED)) { - LOGGER.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + LOGGER.warn("Deleted/Disabled/Locked user ({}) with id={} attempting to access public API", user, userId); return false; } return true; @@ -545,15 +548,14 @@ public class ConsoleProxyServlet extends HttpServlet { Account account = userAcctPair.second(); if (!user.getState().equals(Account.State.ENABLED) || !account.getState().equals(Account.State.ENABLED)) { - LOGGER.debug("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + - "; accountState: " + account.getState()); + LOGGER.debug("disabled or locked user accessing the api, user: {}; state: {}; accountState: {}", user, user.getState(), account.getState()); return false; } // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - LOGGER.debug("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + LOGGER.debug("User does not have a secret key associated with the account -- ignoring request, user: {}", user); return false; } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 2a6494cffcd..67f2e0ab7a4 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -123,17 +123,17 @@ public class ImageStoreServiceImpl extends ManagerBase implements ImageStoreServ continue; } if (store.isReadonly()) { - logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it... "); + logger.warn("Secondary storage: {} cannot be considered for migration as has read-only permission, Skipping it... ", store); continue; } if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { - logger.warn("Destination image store : " + store.getName() + " not NFS based. Store not suitable for migration!"); + logger.warn("Destination image store : {} not NFS based. Store not suitable for migration!", store); continue; } if (srcStoreDcId != null && store.getDataCenterId() != null && !srcStoreDcId.equals(store.getDataCenterId())) { - logger.warn("Source and destination stores are not in the same zone. Skipping destination store: " + store.getName()); + logger.warn("Source and destination stores are not in the same zone. Skipping destination store: {}", store); continue; } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java index c356a62c627..334e9f10835 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -219,7 +219,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto DataStore dataStore = storeMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store {}", dataStore); continue; } VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId()); @@ -235,11 +235,11 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for volume {}. Error details: {}", volume, e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume {}", volume); continue; } handleVolumeStatusResponse((UploadStatusAnswer)answer, volume, volumeDataStore); @@ -263,7 +263,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto DataStore dataStore = storeMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store {}", dataStore); continue; } VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); @@ -279,17 +279,19 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for template {}. Error details: {}", template, e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for template {}", template); continue; } handleTemplateStatusResponse((UploadStatusAnswer)answer, template, templateDataStore); } } else { - String error = "Template " + template.getUuid() + " failed to upload as SSVM is either destroyed or SSVM agent not in 'Up' state"; + String error = String.format( + "Template %s failed to upload as SSVM is either destroyed or SSVM agent not in 'Up' state", + template); handleTemplateStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), template, templateDataStore); } } catch (Throwable th) { @@ -333,7 +335,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto Volume.class.getName(), tmpVolume.getUuid()); if (logger.isDebugEnabled()) { - logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully"); + logger.debug("Volume {} uploaded successfully", tmpVolume); } break; case IN_PROGRESS: @@ -346,7 +348,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); - msg = "Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Volume %s failed to upload due to operation timed out", tmpVolume); logger.error(msg); sendAlert = true; } else { @@ -358,7 +360,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); - msg = "Volume " + tmpVolume.getUuid() + " failed to upload. Error details: " + answer.getDetails(); + msg = String.format("Volume %s failed to upload. Error details: %s", tmpVolume, answer.getDetails()); logger.error(msg); sendAlert = true; break; @@ -368,7 +370,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationTimeout, null, _volumeDao); - msg = "Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Volume %s failed to upload due to operation timed out", tmpVolume); logger.error(msg); sendAlert = true; } @@ -415,7 +417,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto logger.debug("Received OVF information from the uploaded template"); boolean persistDeployAsIs = deployAsIsHelper.persistTemplateOVFInformationAndUpdateGuestOS(tmpTemplate.getId(), ovfInformationTO, tmpTemplateDataStore); if (!persistDeployAsIs) { - logger.info("Failed persisting deploy-as-is template details for template " + template.getName()); + logger.info("Failed persisting deploy-as-is template details for template {}", template); break; } } @@ -428,7 +430,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - msg = "Multi-disk OVA template " + tmpTemplate.getUuid() + " failed to process data disks"; + msg = String.format("Multi-disk OVA template %s failed to process data disks", tmpTemplate); logger.error(msg); sendAlert = true; break; @@ -447,7 +449,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.getPhysicalSize(), tmpTemplateDataStore.getSize(), VirtualMachineTemplate.class.getName(), tmpTemplate.getUuid()); if (logger.isDebugEnabled()) { - logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully"); + logger.debug("Template {} uploaded successfully", tmpTemplate); } break; case IN_PROGRESS: @@ -460,7 +462,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - msg = "Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Template %s failed to upload due to operation timed out", tmpTemplate); logger.error(msg); sendAlert = true; } else { @@ -472,7 +474,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - msg = "Template " + tmpTemplate.getUuid() + " failed to upload. Error details: " + answer.getDetails(); + msg = String.format("Template %s failed to upload. Error details: %s", tmpTemplate, answer.getDetails()); logger.error(msg); sendAlert = true; break; @@ -482,7 +484,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationTimeout, null, _templateDao); - msg = "Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Template %s failed to upload due to operation timed out", tmpTemplate); logger.error(msg); sendAlert = true; } diff --git a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java index bbd2a506e4c..0bc586c28de 100644 --- a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java @@ -88,12 +88,12 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou private List> marshalNodes(List hosts) { Integer i = 0; List> lst = new ArrayList>(); - for (HostVO h : hosts) { + for (HostVO host : hosts) { /** * Don't show "node" in node name otherwise OVM's utils/config_o2cb.sh will be going crazy */ - String nodeName = "ovm_" + h.getPrivateIpAddress().replace(".", "_"); - Ternary node = new Ternary(i, h.getPrivateIpAddress(), nodeName); + String nodeName = "ovm_" + host.getPrivateIpAddress().replace(".", "_"); + Ternary node = new Ternary(i, host.getPrivateIpAddress(), nodeName); lst.add(node); i++; } @@ -102,14 +102,14 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou private boolean prepareNodes(String clusterName, List hosts) { PrepareOCFS2NodesCommand cmd = new PrepareOCFS2NodesCommand(clusterName, marshalNodes(hosts)); - for (HostVO h : hosts) { - Answer ans = _agentMgr.easySend(h.getId(), cmd); + for (HostVO host : hosts) { + Answer ans = _agentMgr.easySend(host.getId(), cmd); if (ans == null) { - logger.debug("Host " + h.getId() + " is not in UP state, skip preparing OCFS2 node on it"); + logger.debug("Host {} is not in UP state, skip preparing OCFS2 node on it", host); continue; } if (!ans.getResult()) { - logger.warn("PrepareOCFS2NodesCommand failed on host " + h.getId() + " " + ans.getDetails()); + logger.warn("PrepareOCFS2NodesCommand failed on host {} {}", host, ans.getDetails()); return false; } } @@ -150,7 +150,7 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); List hosts = sc.list(); if (hosts.isEmpty()) { - logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); + logger.debug("There is no host in cluster {}, no need to prepare OCFS2 nodes", cluster); return true; } @@ -178,8 +178,8 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou @Override public void processDeletHostEventAfter(Host host) { String errMsg = - String.format("Prepare OCFS2 nodes failed after delete host %1$s (zone:%2$s, pod:%3$s, cluster:%4$s", host.getId(), host.getDataCenterId(), host.getPodId(), - host.getClusterId()); + String.format("Prepare OCFS2 nodes failed after delete host %s (zone: %s, pod: %s, cluster: %s", + host, host.getDataCenterId(), host.getPodId(), host.getClusterId()); if (host.getHypervisorType() != HypervisorType.Ovm) { return; diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index f2e03cddb7c..7b14cae151e 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -436,7 +436,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // available for (VolumeVO vol : vols) { if (vol.getRemoved() != null) { - logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); + logger.warn("Volume: {} is removed, cannot share on this instance: {}", vol, vm); // not ok to share return false; } @@ -988,7 +988,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Check if zone is disabled Account account = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } Map params = new HashMap<>(); @@ -1137,7 +1137,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new IllegalArgumentException(String.format("Unable to find storage pool with ID: %d", id)); } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException(String.format("Primary storage %s cannot be disabled. Storage pool state : %s", primaryStorage, primaryStorage.getStatus().toString())); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -1156,7 +1156,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new IllegalArgumentException(String.format("Unable to find storage pool with ID: %d", id)); } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException(String.format("Primary storage %s cannot be enabled. Storage pool state : %s", primaryStorage, primaryStorage.getStatus())); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -1382,7 +1382,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C final Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (answer == null ? "" : (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "")); logger.error(errMsg); @@ -1402,8 +1402,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new InvalidParameterValueException("Unable to find pool by id " + id); } if (sPool.getStatus() != StoragePoolStatus.Maintenance) { - logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); - throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); + logger.warn("Unable to delete storage pool: {} due to it is not in Maintenance state", sPool); + throw new InvalidParameterValueException(String.format("Unable to delete storage due to it is not in Maintenance state, pool: %s", sPool)); } if (sPool.getPoolType() == StoragePoolType.DatastoreCluster) { @@ -1419,8 +1419,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } }); } else { - logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); - throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::toString, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are associated non-destroyed vols for this pool", sPool)); } } return deleteDataStoreInternal(sPool, forced); @@ -1481,8 +1481,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (vlms.first() > 0) { Pair nonDstrdVlms = volumeDao.getNonDestroyedCountAndTotalByPool(sPool.getId()); if (nonDstrdVlms.first() > 0) { - logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); - throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::toString, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool)); } // force expunge non-destroyed volumes List vols = volumeDao.listVolumesToBeDestroyed(); @@ -1491,7 +1491,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { future.get(); } catch (InterruptedException | ExecutionException e) { - logger.debug("expunge volume failed:" + vol.getId(), e); + logger.debug("expunge volume failed: {}", vol, e); } } } @@ -1499,8 +1499,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Check if the pool has associated volumes in the volumes table // If it does , then you cannot delete the pool if (vlms.first() > 0) { - logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); - throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::toString, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool)); } } @@ -1509,13 +1509,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (lock == null) { if (logger.isDebugEnabled()) { - logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + sPool.getId()); + logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO: {}", sPool); } return false; } _storagePoolDao.releaseFromLockTable(lock.getId()); - logger.trace("Released lock for storage pool " + sPool.getId()); + logger.trace("Released lock for storage pool {}", sPool); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName()); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); @@ -1541,25 +1541,24 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { + public boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - logger.debug("Adding pool " + pool.getName() + " to host " + hostId); + logger.debug("Adding pool {} to host {}", pool, host); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); - return listener.hostConnect(hostId, pool.getId()); + return listener.hostConnect(host, pool); } @Override - public void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { - StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + public void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException { assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - logger.debug("Removing pool " + pool.getName() + " from host " + hostId); + logger.debug("Removing pool {} from host {}", pool, host); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); - listener.hostDisconnected(hostId, pool.getId()); + listener.hostDisconnected(host, pool); } @Override @@ -1601,14 +1600,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // All this is for the inaccuracy of floats for big number multiplication. BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue(); - logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(storagePool.getCapacityBytes())); + logger.debug("Found storage pool {} of type {} with overprovisioning factor {}", storagePool, storagePool.getPoolType(), overProvFactor); + logger.debug("Total over provisioned capacity calculated is {} * {}", overProvFactor, toHumanReadableSize(storagePool.getCapacityBytes())); } else { - logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); + logger.debug("Found storage pool {} of type {}", storagePool, storagePool.getPoolType()); totalOverProvCapacity = storagePool.getCapacityBytes(); } - logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); + logger.debug("Total over provisioned capacity of the pool {} is {}", storagePool, toHumanReadableSize(totalOverProvCapacity)); CapacityState capacityState = CapacityState.Enabled; if (storagePool.getScope() == ScopeType.ZONE) { DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId()); @@ -1650,8 +1649,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _capacityDao.update(capacity.getId(), capacity); } } - logger.debug("Successfully set Capacity - " + toHumanReadableSize(totalOverProvCapacity) + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " - + storagePool.getId() + ", PodId " + storagePool.getPodId()); + logger.debug("Successfully set Capacity - {} for capacity type - {} , DataCenterId - {}, Pool - {}, PodId {}", + toHumanReadableSize(totalOverProvCapacity), capacityType, storagePool.getDataCenterId(), storagePool, storagePool.getPodId()); } @Override @@ -1679,7 +1678,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C hostIds.removeAll(hostIdsToAvoid); } if (hostIds == null || hostIds.isEmpty()) { - throw new StorageUnavailableException("Unable to send command to the pool " + pool.getId() + " due to there is no enabled hosts up in this cluster", pool.getId()); + throw new StorageUnavailableException(String.format("Unable to send command to the pool %s due to there is no enabled hosts up in this cluster", pool), pool.getId()); } for (Long hostId : hostIds) { try { @@ -1690,10 +1689,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C answers.add(_agentMgr.send(targetHostId, cmd)); } return new Pair<>(hostId, answers.toArray(new Answer[answers.size()])); - } catch (AgentUnavailableException e) { - logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); - } catch (OperationTimedoutException e) { - logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); + } catch (AgentUnavailableException | OperationTimedoutException e) { + logger.debug("Unable to send storage pool command to {} via {}", pool::toString, () -> _hostDao.findById(hostId), () -> e); } } @@ -1729,19 +1726,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { List unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool); - logger.debug(String.format("Storage pool garbage collector found [%s] templates to be cleaned up in storage pool [%s].", unusedTemplatesInPool.size(), pool.getName())); + logger.debug("Storage pool garbage collector found [{}] templates to be cleaned up in storage pool [{}].", unusedTemplatesInPool.size(), pool); for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - logger.debug(String.format("Storage pool garbage collector is skipping template [%s] clean up on pool [%s] " + - "because it is not completely downloaded.", templatePoolVO.getTemplateId(), templatePoolVO.getPoolId())); + logger.debug("Storage pool garbage collector is skipping " + + "template: {} on pool {} because it is not completely downloaded.", + () -> _templateDao.findById(templatePoolVO.getTemplateId()), () -> _storagePoolDao.findById(templatePoolVO.getPoolId())); continue; } if (!templatePoolVO.getMarkedForGC()) { templatePoolVO.setMarkedForGC(true); _vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO); - logger.debug(String.format("Storage pool garbage collector has marked template [%s] on pool [%s] " + - "for garbage collection.", templatePoolVO.getTemplateId(), templatePoolVO.getPoolId())); + logger.debug("Storage pool garbage collector has marked template [{}] on pool [{}] " + + "for garbage collection.", + () -> _templateDao.findById(templatePoolVO.getTemplateId()), () -> _storagePoolDao.findById(templatePoolVO.getPoolId())); continue; } @@ -1783,9 +1782,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C logger.debug(String.format("Did not find snapshot [%s] in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); } } catch (Exception e) { - logger.error(String.format("Failed to delete snapshot [%s] from storage due to: [%s].", snapshotDataStoreVO.getSnapshotId(), e.getMessage())); + logger.error("Failed to delete snapshot [{}] from storage due to: [{}].", snapshot, e.getMessage()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Failed to delete snapshot [%s] from storage.", snapshotUuid), e); + logger.debug("Failed to delete snapshot [{}] from storage.", snapshot, e); } } } @@ -1796,8 +1795,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (Type.ROOT.equals(vol.getVolumeType())) { VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vol.getInstanceId()); if (vmInstanceVO != null && vmInstanceVO.getState() == State.Destroyed) { - logger.debug(String.format("ROOT volume [%s] will not be expunged because the VM is [%s], therefore this volume will be expunged with the VM" - + " cleanup job.", vol.getUuid(), vmInstanceVO.getState())); + logger.debug("ROOT volume [{}] will not be expunged because the VM is [{}], therefore this volume will be expunged with the VM" + + " cleanup job.", vol, vmInstanceVO.getState()); continue; } } @@ -1810,8 +1809,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // system, but not necessary. handleManagedStorage(vol); } catch (Exception e) { - logger.error(String.format("Unable to destroy host-side clustered file system [%s] due to: [%s].", vol.getUuid(), e.getMessage())); - logger.debug(String.format("Unable to destroy host-side clustered file system [%s].", vol.getUuid()), e); + logger.error("Unable to destroy host-side clustered file system [{}] due to: [{}].", vol, e.getMessage()); + logger.debug("Unable to destroy host-side clustered file system [{}].", vol, e); } try { @@ -1820,11 +1819,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C volService.ensureVolumeIsExpungeReady(vol.getId()); volService.expungeVolumeAsync(volumeInfo); } else { - logger.debug(String.format("Volume [%s] is already destroyed.", vol.getUuid())); + logger.debug("Volume [{}] is already destroyed.", vol); } } catch (Exception e) { - logger.error(String.format("Unable to destroy volume [%s] due to: [%s].", vol.getUuid(), e.getMessage())); - logger.debug(String.format("Unable to destroy volume [%s].", vol.getUuid()), e); + logger.error("Unable to destroy volume [{}] due to: [{}].", vol, e.getMessage()); + logger.debug("Unable to destroy volume [{}].", vol, e); } } @@ -1838,8 +1837,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } _snapshotDao.expunge(snapshotVO.getId()); } catch (Exception e) { - logger.error(String.format("Unable to destroy snapshot [%s] due to: [%s].", snapshotVO.getUuid(), e.getMessage())); - logger.debug(String.format("Unable to destroy snapshot [%s].", snapshotVO.getUuid()), e); + logger.error("Unable to destroy snapshot [{}] due to: [{}].", snapshotVO, e.getMessage()); + logger.debug("Unable to destroy snapshot [{}].", snapshotVO, e); } } @@ -1855,7 +1854,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStore dataStore = _dataStoreMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - logger.warn(String.format("There is no secondary storage VM for image store [%s], cannot destroy uploaded volume [%s].", dataStore.getName(), volume.getUuid())); + logger.warn("There is no secondary storage VM for image store {}, cannot destroy uploaded volume {}.", dataStore, volume); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1868,18 +1867,18 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // expunge volume from secondary if volume is on image store VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image); if (volOnSecondary != null) { - logger.info(String.format("Expunging volume [%s] uploaded using HTTP POST from secondary data store.", volume.getUuid())); + logger.info("Expunging volume [{}] uploaded using HTTP POST from secondary data store.", volume); AsyncCallFuture future = volService.expungeVolumeAsync(volOnSecondary); VolumeApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn(String.format("Failed to expunge volume [%s] from the image store [%s] due to: [%s].", volume.getUuid(), dataStore.getName(), result.getResult())); + logger.warn("Failed to expunge volume {} from the image store {} due to: {}", volume, dataStore, result.getResult()); } } } } } catch (Throwable th) { - logger.error(String.format("Unable to destroy uploaded volume [%s] due to: [%s].", volume.getUuid(), th.getMessage())); - logger.debug(String.format("Unable to destroy uploaded volume [%s].", volume.getUuid()), th); + logger.error("Unable to destroy uploaded volume [{}] due to: [{}].", volume, th.getMessage()); + logger.debug("Unable to destroy uploaded volume [{}].", volume, th); } } @@ -1895,7 +1894,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStore dataStore = _dataStoreMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - logger.warn(String.format("Cannot destroy uploaded template [%s] as there is no secondary storage VM for image store [%s].", template.getUuid(), dataStore.getName())); + logger.warn("Cannot destroy uploaded template {} as there is no secondary storage VM for image store {}.", template, dataStore); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1904,7 +1903,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C AsyncCallFuture future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore)); TemplateApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn(String.format("Failed to delete template [%s] from image store [%s] due to: [%s]", template.getUuid(), dataStore.getName(), result.getResult())); + logger.warn("Failed to delete template {} from the image store {} due to: {}", template, dataStore, result.getResult()); continue; } // remove from template_zone_ref @@ -1928,8 +1927,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } } catch (Throwable th) { - logger.error(String.format("Unable to destroy uploaded template [%s] due to: [%s].", template.getUuid(), th.getMessage())); - logger.debug(String.format("Unable to destroy uploaded template [%s].", template.getUuid()), th); + logger.error("Unable to destroy uploaded template [{}] due to: [{}].", template, th.getMessage()); + logger.debug("Unable to destroy uploaded template [{}].", template, th); } } cleanupInactiveTemplates(); @@ -2015,7 +2014,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (answer != null && answer.getResult()) { volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); } else { - logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid()); + logger.warn("Unable to remove host-side clustered file system for the following volume: {}", volume); } } } @@ -2077,7 +2076,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { long storeId = store.getId(); List destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId); - logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found {} templates to cleanup on template_store_ref for store: {}", destroyedTemplateStoreVOs.size(), store); for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) { if (logger.isDebugEnabled()) { logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); @@ -2085,7 +2084,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _templateStoreDao.remove(destroyedTemplateStoreVO.getId()); } } catch (Exception e) { - logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e); + logger.warn("problem cleaning up templates in template_store_ref for store: {}", store, e); } } @@ -2093,7 +2092,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (DataStore store : imageStores) { try { List destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId()); - logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found {} snapshots to cleanup on snapshot_store_ref for store: {}", destroyedSnapshotStoreVOs.size(), store); for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) { // check if this snapshot has child SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store); @@ -2121,7 +2120,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } catch (Exception e2) { - logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: {}", store, e2); } } @@ -2131,7 +2130,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { List destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId()); destroyedStoreVOs.addAll(_volumeDataStoreDao.listByVolumeState(Volume.State.Expunged)); - logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found {} volumes to cleanup on volume_store_ref for store: {}", destroyedStoreVOs.size(), store); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { if (logger.isDebugEnabled()) { logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); @@ -2140,7 +2139,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } catch (Exception e2) { - logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up volumes in volume_store_ref for store: {}", store, e2); } } } catch (Exception e3) { @@ -2175,7 +2174,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException(String.format("Primary storage %s is not ready for migration, as the status is:%s", primaryStorage, primaryStorage.getStatus().toString())); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -2184,7 +2183,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (primaryStorage.getPoolType() == StoragePoolType.DatastoreCluster) { if (primaryStorage.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException(String.format("There is already a job running for preparation for maintenance of the storage pool %s", primaryStorage.getUuid())); + throw new CloudRuntimeException(String.format("There is already a job running for preparation for maintenance of the storage pool %s", primaryStorage)); } handlePrepareDatastoreClusterMaintenance(lifeCycle, primaryStorageId); } @@ -2216,7 +2215,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C lifeCycle.maintain(childStore); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Exception on maintenance preparation of one of the child datastores in datastore cluster %d with error %s", primaryStorageId, e)); + logger.debug("Exception on maintenance preparation of one of the child datastores in datastore cluster {} with error {}", datastoreCluster, e); } // Set to ErrorInMaintenance state of all child storage pools and datastore cluster for (StoragePoolVO childDatastore : childDatastores) { @@ -2225,7 +2224,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } datastoreCluster.setStatus(StoragePoolStatus.ErrorInMaintenance); _storagePoolDao.update(datastoreCluster.getId(), datastoreCluster); - throw new CloudRuntimeException(String.format("Failed to prepare maintenance mode for datastore cluster %d with error %s %s", primaryStorageId, e.getMessage(), e)); + throw new CloudRuntimeException(String.format("Failed to prepare maintenance mode for datastore cluster %s with error %s %s", datastoreCluster, e.getMessage(), e)); } } } @@ -2247,7 +2246,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (primaryStorage.getStatus().equals(StoragePoolStatus.Up) || primaryStorage.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)) { - throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), + throw new StorageUnavailableException("Primary storage " + primaryStorage + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), primaryStorageId); } @@ -2276,7 +2275,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C StoragePoolVO pool = _storagePoolDao.findById(poolId); if (pool == null) { - String msg = String.format("Unable to obtain lock on the storage pool record while syncing storage pool [%s] with management server", pool.getUuid()); + String msg = String.format("Unable to find the storage pool with id %d record while syncing storage pool with management server", poolId); logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -2286,7 +2285,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (!pool.getStatus().equals(StoragePoolStatus.Up)) { - throw new InvalidParameterValueException(String.format("Primary storage with id %s is not ready for syncing, as the status is %s", pool.getUuid(), pool.getStatus().toString())); + throw new InvalidParameterValueException(String.format("Primary storage %s is not ready for syncing, as the status is %s", pool, pool.getStatus().toString())); } // find the host @@ -2299,11 +2298,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C final Answer answer = _agentMgr.easySend(hostId, modifyStoragePoolCommand); if (answer == null) { - throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool.getUuid())); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool)); } if (!answer.getResult()) { - throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool.getUuid(), hostId, answer.getDetails())); + throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool, _hostDao.findById(hostId), answer.getDetails())); } assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + @@ -2322,7 +2321,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } else { - throw new CloudRuntimeException(String.format("Unable to sync storage pool [%s] as there no connected hosts to the storage pool", pool.getUuid())); + throw new CloudRuntimeException(String.format("Unable to sync storage pool [%s] as there no connected hosts to the storage pool", pool)); } return (PrimaryDataStoreInfo) _dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } @@ -2469,8 +2468,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } if (dataStoreVO != null && !dataStoreVO.getStatus().equals(StoragePoolStatus.Up)) { - String msg = String.format("Cannot synchronise datastore cluster %s because primary storage with id %s is not in Up state, " + - "current state is %s", datastoreClusterPool.getUuid(), dataStoreVO.getUuid(), dataStoreVO.getStatus().toString()); + String msg = String.format("Cannot synchronise datastore cluster %s because primary storage %s is not in Up state, " + + "current state is %s", datastoreClusterPool, dataStoreVO, dataStoreVO.getStatus().toString()); throw new CloudRuntimeException(msg); } } @@ -2542,18 +2541,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); disk.setDetails(details); - logger.debug(String.format("Attempting to process SyncVolumePathCommand for the volume %d on the host %d with state %s", volumeId, hostId, hostVO.getResourceState())); + logger.debug("Attempting to process SyncVolumePathCommand for the volume {} on the host {} with state {}", volume, hostVO, hostVO.getResourceState()); SyncVolumePathCommand cmd = new SyncVolumePathCommand(disk); final Answer answer = _agentMgr.easySend(hostId, cmd); // validate answer if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the SyncVolumePath command for volume " + volumeId); + throw new CloudRuntimeException(String.format("Unable to get an answer to the SyncVolumePath command for volume %s", volume)); } if (!answer.getResult()) { - throw new CloudRuntimeException("Unable to process SyncVolumePathCommand for the volume" + volumeId + " to the host " + hostId + " due to " + answer.getDetails()); + throw new CloudRuntimeException(String.format("Unable to process SyncVolumePathCommand for the volume %s to the host %s due to %s", volume, hostVO, answer.getDetails())); } - assert (answer instanceof SyncVolumePathAnswer) : "Well, now why won't you actually return the SyncVolumePathAnswer when it's SyncVolumePathCommand? volume=" + - volume.getUuid() + "Host=" + hostId; + assert (answer instanceof SyncVolumePathAnswer) : String.format("Well, now why won't you actually return the SyncVolumePathAnswer when it's SyncVolumePathCommand? volume=%s Host=%s", volume, hostVO); // check for the changed details of volume and update database VolumeVO volumeVO = volumeDao.findById(volumeId); @@ -2563,7 +2561,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (storagePoolVO != null) { volumeVO.setPoolId(storagePoolVO.getId()); } else { - logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); + logger.warn("Unable to find datastore {} while updating the new datastore of the volume {}", datastoreName, volumeVO); } } @@ -2627,7 +2625,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost vo : nodeList) { if (vo.getMsid() == _serverId) { - logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid()); + logger.info("Cleaning up storage maintenance jobs associated with Management server: {}", vo); List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); if (poolIds.size() > 0) { for (Long poolId : poolIds) { @@ -2840,7 +2838,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (secHost.getType() != Host.Type.SecondaryStorage) { - throw new InvalidParameterValueException("host: " + secStorageId + " is not a secondary storage"); + throw new InvalidParameterValueException(String.format("host: %s is not a secondary storage", secHost)); } URI uri = null; @@ -2916,13 +2914,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C double usedPercentage = ((double)usedSize / (double)totalSize); double storageUsedThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(pool.getDataCenterId()); if (logger.isDebugEnabled()) { - logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + pool.getUsedBytes() + - ", usedPct: " + usedPercentage + ", disable threshold: " + storageUsedThreshold); + logger.debug("Checking pool {} for storage, totalSize: {}, usedBytes: {}, usedPct: {}, disable threshold: {}", pool, pool.getCapacityBytes(), pool.getUsedBytes(), usedPercentage, storageUsedThreshold); } if (usedPercentage >= storageUsedThreshold) { if (logger.isDebugEnabled()) { - logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + - " has crossed the pool.storage.capacity.disablethreshold: " + storageUsedThreshold); + logger.debug("Insufficient space on pool: {} since its usage percentage: {} has crossed the pool.storage.capacity.disablethreshold: {}", pool, usedPercentage, storageUsedThreshold); } return false; } @@ -2952,7 +2948,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Only IOPS-guaranteed primary storage like SolidFire is using/setting IOPS. // This check returns true for storage that does not specify IOPS. if (pool.getCapacityIops() == null) { - logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); + logger.info("Storage pool {} does not supply IOPS capacity, assuming enough capacity", pool); return true; } @@ -3037,7 +3033,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // allocated space includes templates if (logger.isDebugEnabled()) { - logger.debug("Destination pool id: " + pool.getId()); + logger.debug("Destination pool: {}", pool); } // allocated space includes templates final StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); @@ -3071,7 +3067,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (logger.isDebugEnabled()) { - logger.debug("Pool ID for the volume with ID " + volumeVO.getId() + " is " + volumeVO.getPoolId()); + logger.debug("Pool ID for the volume {} is {}", volumeVO, volumeVO.getPoolId()); } // A ready-state volume is already allocated in a pool, so the asking size is zero for it. @@ -3092,7 +3088,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return false; } if (logger.isDebugEnabled()) { - logger.debug("Destination pool id: " + pool.getId()); + logger.debug("Destination pool: {}", pool); } long totalAskingSize = newSize - currentSize; @@ -3156,7 +3152,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // check cummilative result for all volumes for (Pair answer : answers) { if (!answer.second().getResult()) { - logger.debug(String.format("Storage pool %s is not compliance with storage policy for volume %s", pool.getUuid(), answer.first().getName())); + logger.debug("Storage pool {} is not compliance with storage policy for volume {}", pool, answer.first().getName()); return false; } } @@ -3178,29 +3174,32 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue(); - logger.debug("Found storage pool " + pool.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(pool.getCapacityBytes())); + logger.debug("Found storage pool {} of type {} with overprovisioning factor {}", pool, pool.getPoolType(), overProvFactor); + logger.debug("Total over provisioned capacity calculated is {} * {}", overProvFactor, toHumanReadableSize(pool.getCapacityBytes())); } else { totalOverProvCapacity = pool.getCapacityBytes(); - logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString()); + logger.debug("Found storage pool {} of type {}", poolVO, pool.getPoolType()); } - logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); + logger.debug("Total capacity of the pool {} is {}", poolVO, toHumanReadableSize(totalOverProvCapacity)); double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId()); if (logger.isDebugEnabled()) { - logger.debug("Checking pool: " + pool.getId() + " for storage allocation , maxSize : " + toHumanReadableSize(totalOverProvCapacity) + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) - + ", askingSize : " + toHumanReadableSize(totalAskingSize) + ", allocated disable threshold: " + storageAllocatedThreshold); + logger.debug("Checking pool: {} for storage allocation , maxSize : {}, " + + "totalAllocatedSize : {}, askingSize : {}, allocated disable threshold: {}", + pool, toHumanReadableSize(totalOverProvCapacity), toHumanReadableSize(allocatedSizeWithTemplate), toHumanReadableSize(totalAskingSize), storageAllocatedThreshold); } double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity); if (usedPercentage > storageAllocatedThreshold) { if (logger.isDebugEnabled()) { - logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation since its allocated percentage: " + usedPercentage - + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold); + logger.debug("Insufficient un-allocated capacity on: {} for storage " + + "allocation since its allocated percentage: {} has crossed the allocated" + + " pool.storage.allocated.capacity.disablethreshold: {}", + pool, usedPercentage, storageAllocatedThreshold); } if (!forVolumeResize) { return false; @@ -3220,8 +3219,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) { if (logger.isDebugEnabled()) { - logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation, not enough storage, maxSize : " + toHumanReadableSize(totalOverProvCapacity) - + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) + ", askingSize : " + toHumanReadableSize(totalAskingSize)); + logger.debug("Insufficient un-allocated capacity on: {} for storage " + + "allocation, not enough storage, maxSize : {}, totalAllocatedSize : {}, " + + "askingSize : {}", pool, toHumanReadableSize(totalOverProvCapacity), + toHumanReadableSize(allocatedSizeWithTemplate), toHumanReadableSize(totalAskingSize)); } return false; @@ -3911,6 +3912,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) { long volumeId = volumeOnImageStore.getVolumeId(); + VolumeVO volume = volumeDao.findById(volumeId); try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), volumeOnImageStore.getExtractUrlCreated()); if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet @@ -3918,7 +3920,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C continue; } expiredVolumeIds.add(volumeId); - logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId); + logger.debug("Removing download url {} for volume {}", volumeOnImageStore.getExtractUrl(), volume); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -3927,7 +3929,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Now expunge it from DB since this entry was created only for download purpose _volumeStoreDao.expunge(volumeOnImageStore.getId()); } catch (Throwable th) { - logger.warn("Caught exception while deleting download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); + logger.warn("Caught exception while deleting download url {} for volume {}", volumeOnImageStore.getExtractUrl(), volume, th); } } for (Long volumeId : expiredVolumeIds) { @@ -3943,14 +3945,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Cleanup expired template URLs List templatesOnImageStoreList = _templateStoreDao.listTemplateDownloadUrls(); for (TemplateDataStoreVO templateOnImageStore : templatesOnImageStoreList) { - + VMTemplateVO template = _templateDao.findById(templateOnImageStore.getId()); try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), templateOnImageStore.getExtractUrlCreated()); if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet continue; } - logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); + logger.debug("Removing download url {} for template {}", templateOnImageStore.getExtractUrl(), template); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -3961,7 +3963,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C templateOnImageStore.setExtractUrlCreated(null); _templateStoreDao.update(templateOnImageStore.getId(), templateOnImageStore); } catch (Throwable th) { - logger.warn("caught exception while deleting download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId(), th); + logger.warn("caught exception while deleting download url {} for template {}", templateOnImageStore.getExtractUrl(), template, th); } } @@ -3973,7 +3975,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C secStore.deleteExtractUrl(imageStoreObjectDownloadVO.getPath(), imageStoreObjectDownloadVO.getDownloadUrl(), null); _imageStoreObjectDownloadDao.expunge(imageStoreObjectDownloadVO.getId()); } catch (Throwable th) { - logger.warn("caught exception while deleting download url " + imageStoreObjectDownloadVO.getDownloadUrl() + " for object id " + imageStoreObjectDownloadVO.getId(), th); + logger.warn("caught exception while deleting download url {} for object {}", imageStoreObjectDownloadVO.getDownloadUrl(), imageStoreObjectDownloadVO, th); } } } @@ -4189,7 +4191,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _objectStoreDao.remove(storeId); } }); - logger.debug("Successfully deleted object store with Id: "+storeId); + logger.debug("Successfully deleted object store: {}", store); return true; } @@ -4230,7 +4232,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C objectStoreVO.setName(cmd.getName()); } _objectStoreDao.update(id, objectStoreVO); - logger.debug("Successfully updated object store with Id: "+id); + logger.debug("Successfully updated object store: {}", objectStoreVO); return objectStoreVO; } } diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index f1c7c38b8dc..60494dcb05c 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -128,8 +128,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { for (StoragePoolVO sp : spes) { if (sp.getParent() != pool.getParent() && sp.getId() != pool.getParent()) { // If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + - " is already in PrepareForMaintenance mode "); + throw new CloudRuntimeException(String.format("Only one storage pool in a cluster can be in PrepareForMaintenance mode, %s is already in PrepareForMaintenance mode ", sp)); } } } @@ -172,7 +171,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { logger.debug("ModifyStoragePool false succeeded"); } if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); + logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool); storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); } } @@ -348,7 +347,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { logger.debug("ModifyStoragePool add succeeded"); } if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); + logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool); storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 689d159905f..2772ce0944a 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -845,7 +845,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } if (snapshotCheck.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for volume creation"); + throw new InvalidParameterValueException(String.format("Snapshot %s is not in %s state yet and can't be used for volume creation", snapshotCheck, Snapshot.State.BackedUp)); } SnapshotDataStoreVO snapshotStore = _snapshotDataStoreDao.findOneBySnapshotAndDatastoreRole(snapshotId, DataStoreRole.Primary); @@ -922,7 +922,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // Check if zone is disabled if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone: %s is currently disabled", zone)); } // If local storage is disabled then creation of volume with local disk @@ -1054,10 +1054,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic created = false; VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); vol.stateTransit(Volume.Event.DestroyRequested); - throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e); + throw new CloudRuntimeException(String.format("Failed to create volume: %s", volume), e); } finally { if (!created) { - logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); + VolumeVO finalVolume = volume; + logger.trace("Decrementing volume resource count for account {} as volume failed to create on the backend", () -> _accountMgr.getAccount(finalVolume.getAccountId())); _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), cmd.getDisplayVolume(), volume.getSize(), _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId())); } @@ -1595,7 +1596,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic cleanVolumesCache(volume); return true; } catch (InterruptedException | ExecutionException e) { - logger.warn("Failed to expunge volume: " + volume.getUuid(), e); + logger.warn("Failed to expunge volume: {}", volume, e); return false; } } @@ -1678,14 +1679,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private void expungeVolumesInPrimaryOrSecondary(VolumeVO volume, DataStoreRole role) throws InterruptedException, ExecutionException { if (!canAccessVolumeStore(volume, role)) { - logger.debug(String.format("Cannot access the storage pool with role: %s " + - "for the volume: %s, skipping expunge from storage", - role.name(), volume.getName())); + logger.debug("Cannot access the storage pool with role: {} " + + "for the volume: {}, skipping expunge from storage", role.name(), volume); return; } VolumeInfo volOnStorage = volFactory.getVolume(volume.getId(), role); if (volOnStorage != null) { - logger.info("Expunging volume " + volume.getId() + " from " + role + " data store"); + logger.info("Expunging volume {} from {} data store", volume, role); AsyncCallFuture future = volService.expungeVolumeAsync(volOnStorage); VolumeApiResult result = future.get(); if (result.isFailed()) { @@ -1722,7 +1722,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return; } for (VolumeInfo volOnCache : cacheVols) { - logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); + logger.info("Delete volume from image cache store: {}", volOnCache.getDataStore()); volOnCache.delete(); } } @@ -1773,7 +1773,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic stateTransitTo(volume, Volume.Event.DestroyRequested); stateTransitTo(volume, Volume.Event.OperationSucceeded); } catch (NoTransitionException e) { - logger.debug("Failed to destroy volume" + volume.getId(), e); + logger.debug("Failed to destroy volume {}", volume, e); return null; } _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), volume.isDisplay(), @@ -1781,7 +1781,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return volume; } if (!deleteVolumeFromStorage(volume, caller)) { - logger.warn("Failed to expunge volume: " + volumeId); + logger.warn("Failed to expunge volume: {}", volume); return null; } removeVolume(volume.getId()); @@ -1806,7 +1806,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic final VolumeVO volume = _volsDao.findById(volumeId); if (volume == null) { - throw new InvalidParameterValueException("Unable to find a volume with id " + volume); + throw new InvalidParameterValueException(String.format("Unable to find a volume with id %d", volumeId)); } // When trying to expunge, permission is denied when the caller is not an admin and the AllowUserExpungeRecoverVolume is false for the caller. @@ -1831,8 +1831,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _volsDao.detachVolume(volume.getId()); stateTransitTo(volume, Volume.Event.RecoverRequested); } catch (NoTransitionException e) { - logger.debug("Failed to recover volume" + volume.getId(), e); - throw new CloudRuntimeException("Failed to recover volume" + volume.getId(), e); + logger.debug("Failed to recover volume {}", volume, e); + throw new CloudRuntimeException(String.format("Failed to recover volume %s", volume), e); } _resourceLimitMgr.incrementVolumeResourceCount(volume.getAccountId(), volume.isDisplay(), volume.getSize(), _diskOfferingDao.findById(volume.getDiskOfferingId())); @@ -1856,7 +1856,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic .publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), offeringId, volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplay()); - logger.debug(String.format("Volume [%s] has been successfully recovered, thus a new usage event %s has been published.", volume.getUuid(), EventTypes.EVENT_VOLUME_CREATE)); + logger.debug("Volume [{}] has been successfully recovered, thus a new usage event {} has been published.", volume, EventTypes.EVENT_VOLUME_CREATE); } @Override @@ -2071,7 +2071,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */ // We need to publish this event to usage_volume table if (volume.getState() == Volume.State.Allocated) { - logger.debug(String.format("Volume %s is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume.getUuid())); + logger.debug("Volume {} is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume); volume.setSize(newSize); volume.setMinIops(newMinIops); @@ -2121,12 +2121,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (volumeMigrateRequired) { if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume ID: %s as no suitable pool(s) found for migrating to support new disk offering", volume.getUuid())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume: %s as no suitable pool(s) found for migrating to support new disk offering", volume)); } final Long newSizeFinal = newSize; List suitableStoragePoolsWithEnoughSpace = suitableStoragePools.stream().filter(pool -> storageMgr.storagePoolHasEnoughSpaceForResize(pool, 0L, newSizeFinal)).collect(Collectors.toList()); if (CollectionUtils.isEmpty(suitableStoragePoolsWithEnoughSpace)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume ID: %s as no suitable pool(s) with enough space found for volume migration.", volume.getUuid())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume: %s as no suitable pool(s) with enough space found for volume migration.", volume)); } Collections.shuffle(suitableStoragePoolsWithEnoughSpace); MigrateVolumeCmd migrateVolumeCmd = new MigrateVolumeCmd(volume.getId(), suitableStoragePoolsWithEnoughSpace.get(0).getId(), newDiskOffering.getId(), true); @@ -2134,10 +2134,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Volume result = migrateVolume(migrateVolumeCmd); volume = (result != null) ? _volsDao.findById(result.getId()) : null; if (volume == null) { - throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s migration failed to storage pool %s", volume.getUuid(), suitableStoragePools.get(0).getId())); + throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume: %s migration failed to storage pool %s", volume, suitableStoragePools.get(0))); } } catch (Exception e) { - throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s migration failed to storage pool %s due to %s", volume.getUuid(), suitableStoragePools.get(0).getId(), e.getMessage())); + throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume: %s migration failed to storage pool %s due to %s", volume, suitableStoragePools.get(0), e.getMessage())); } } @@ -2464,13 +2464,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } if (logger.isTraceEnabled()) { - String msg = "attaching volume %s/%s to a VM (%s/%s) with an existing volume %s/%s on primary storage %s"; if (existingVolumeOfVm != null) { - logger.trace(String.format(msg, - volumeToAttach.getName(), volumeToAttach.getUuid(), - vm.getName(), vm.getUuid(), - existingVolumeOfVm.getName(), existingVolumeOfVm.getUuid(), - existingVolumeOfVm.getPoolId())); + logger.trace("attaching volume {} to a VM {} with an existing volume {} on primary storage {}", + volumeToAttach, vm, existingVolumeOfVm, _storagePoolDao.findById(existingVolumeOfVm.getPoolId())); } } @@ -2484,7 +2480,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (existingVolumeOfVm != null && !existingVolumeOfVm.getState().equals(Volume.State.Allocated)) { destPrimaryStorage = _storagePoolDao.findById(existingVolumeOfVm.getPoolId()); if (logger.isTraceEnabled() && destPrimaryStorage != null) { - logger.trace(String.format("decided on target storage: %s/%s", destPrimaryStorage.getName(), destPrimaryStorage.getUuid())); + logger.trace("decided on target storage: {}", destPrimaryStorage); } } @@ -2567,8 +2563,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId()); if (logger.isTraceEnabled() && volumeToAttachStoragePool != null) { - logger.trace(String.format("volume to attach (%s/%s) has a primary storage assigned to begin with (%s/%s)", - volumeToAttach.getName(), volumeToAttach.getUuid(), volumeToAttachStoragePool.getName(), volumeToAttachStoragePool.getUuid())); + logger.trace("volume to attach {} has a primary storage assigned to begin with {}", + volumeToAttach, volumeToAttachStoragePool); } checkForMatchingHypervisorTypesIf(volumeToAttachStoragePool != null && !volumeToAttachStoragePool.isManaged(), rootDiskHyperType, volumeToAttachHyperType); @@ -2578,12 +2574,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic AsyncJob job = asyncExecutionContext.getJob(); if (logger.isInfoEnabled()) { - logger.info(String.format("Trying to attach volume [%s/%s] to VM instance [%s/%s], update async job-%s progress status", - volumeToAttach.getName(), - volumeToAttach.getUuid(), - vm.getName(), - vm.getUuid(), - job.getId())); + logger.info("Trying to attach volume [{}] to VM instance [{}], update async job-{} [{}] progress status", + volumeToAttach, vm, job.getId(), job); } DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId()); @@ -3005,10 +2997,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic AsyncJob job = asyncExecutionContext.getJob(); if (logger.isInfoEnabled()) { - logger.info(String.format("Trying to attach volume %s to VM instance %s, update async job-%s progress status", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name", "uuid"), - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "name", "uuid"), - job.getId())); + logger.info("Trying to attach volume {} to VM instance {}, update async job-{} progress status", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "id", "name", "uuid"), + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "id", "name", "uuid"), + job.getId()); } _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId); @@ -3144,7 +3136,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic volumeVO.setPoolId(storagePoolVO.getId()); _volsDao.update(volumeVO.getId(), volumeVO); } else { - logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); + logger.warn("Unable to find datastore {} while updating the new datastore of the volume {}", datastoreName, volume); } } @@ -3228,19 +3220,19 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } @@ -3555,11 +3547,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (volume.getSize() != newDiskOffering.getDiskSize()) { DiskOfferingVO oldDiskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId()); - logger.warn(String.format( - "You are migrating a volume [id=%s] and changing the disk offering[from id=%s to id=%s] to reflect this migration. However, the sizes of the volume and the new disk offering are different.", - volume.getUuid(), oldDiskOffering.getUuid(), newDiskOffering.getUuid())); + logger.warn("You are migrating a volume [{}] and changing the disk offering[from {} to {}] to reflect this migration. However, the sizes of the volume and the new disk offering are different.", + volume, oldDiskOffering, newDiskOffering); } - logger.info(String.format("Changing disk offering to [uuid=%s] while migrating volume [uuid=%s, name=%s].", newDiskOffering.getUuid(), volume.getUuid(), volume.getName())); + logger.info("Changing disk offering to [{}] while migrating volume [{}].", newDiskOffering, volume); } /** @@ -3736,7 +3727,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _accountMgr.checkAccess(caller, null, true, volume); if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); @@ -3817,7 +3808,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && BooleanUtils.toBoolean(_configDao.getValue("sp.bypass.secondary.storage")); @@ -3869,11 +3860,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } if (ImageFormat.DIR.equals(volume.getFormat())) { - throw new InvalidParameterValueException("Snapshot not supported for volume:" + volumeId); + throw new InvalidParameterValueException(String.format("Snapshot not supported for volume: %s", volume.getVolume())); } if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); @@ -3883,7 +3874,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic userVmVO = _userVmDao.findById(instanceId); } if (!isOperationSupported(template, userVmVO)) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); + throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume.getVolume())); } } @@ -3899,7 +3890,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic StoragePool storagePool = (StoragePool)volume.getDataStore(); if (storagePool == null) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " please attach this volume to a VM before create snapshot for it"); + throw new InvalidParameterValueException(String.format("Volume: %s please attach this volume to a VM before create snapshot for it", volume.getVolume())); } if (CollectionUtils.isNotEmpty(zoneIds)) { @@ -3949,7 +3940,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _accountMgr.checkAccess(caller, null, true, volume); VirtualMachine attachVM = volume.getAttachedVM(); if (attachVM == null || attachVM.getId() != vm.getId()) { - throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't attach to vm :" + vm); + throw new InvalidParameterValueException(String.format("Creating snapshot failed due to volume:%s doesn't attach to vm :%s", volume.getVolume(), vm)); } DataCenter zone = _dcDao.findById(volume.getDataCenterId()); @@ -3962,7 +3953,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } if (volume.getTemplateId() != null) { @@ -3973,13 +3964,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic userVmVO = _userVmDao.findById(instanceId); } if (!isOperationSupported(template, userVmVO)) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); + throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume.getVolume())); } } StoragePool storagePool = (StoragePool)volume.getDataStore(); if (storagePool == null) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " please attach this volume to a VM before create snapshot for it"); + throw new InvalidParameterValueException(String.format("Volume: %s please attach this volume to a VM before create snapshot for it", volume.getVolume())); } if (storagePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { @@ -4026,7 +4017,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // Extract activity only for detached volumes or for volumes whose // instance is stopped if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { - logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); + logger.debug("Invalid state of the volume: {}. It should be either detached or the VM should be in stopped state.", volume); PermissionDeniedException ex = new PermissionDeniedException("Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state."); ex.addProxyObject(volume.getUuid(), "volumeId"); throw ex; @@ -4199,7 +4190,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException(String.format("No volume was found with UUID [%s].", volumeUuid)); } - String volumeToString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name", "uuid"); + String volumeToString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "id", "name", "uuid"); if (volume.getInstanceId() != null) { VMInstanceVO vmInstanceVo = _vmInstanceDao.findById(volume.getInstanceId()); @@ -4260,8 +4251,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (volumeStoreRef != null && volumeStoreRef.getExtractUrl() != null) { return Optional.ofNullable(volumeStoreRef.getExtractUrl()); } else if (volumeStoreRef != null) { - logger.debug("volume " + volumeId + " is already installed on secondary storage, install path is " + - volumeStoreRef.getInstallPath()); + logger.debug("volume {} is already installed on secondary storage, install path is {}", volume, volumeStoreRef.getInstallPath()); VolumeInfo destVol = volFactory.getVolume(volumeId, DataStoreRole.Image); if (destVol == null) { throw new CloudRuntimeException("Failed to find the volume on a secondary store"); @@ -4348,12 +4338,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Scope storeForExistingStoreScope = storeForExistingVol.getScope(); if (storeForExistingStoreScope == null) { - throw new CloudRuntimeException("Can't get scope of data store: " + storeForExistingVol.getId()); + throw new CloudRuntimeException(String.format("Can't get scope of data store: %s", storeForExistingVol)); } Scope storeForNewStoreScope = storeForNewVol.getScope(); if (storeForNewStoreScope == null) { - throw new CloudRuntimeException("Can't get scope of data store: " + storeForNewVol.getId()); + throw new CloudRuntimeException(String.format("Can't get scope of data store: %s", storeForNewVol)); } if (storeForNewStoreScope.getScopeType() == ScopeType.ZONE) { @@ -4411,7 +4401,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return; } - final String error = "Volume: " + volumeToAttach.getName() + " is in " + volumeToAttach.getState() + ". It should be in Ready or Allocated state"; + final String error = String.format("Volume: %s is in %s. It should be in Ready or Allocated state", volumeToAttach, volumeToAttach.getState()); logger.error(error); throw new CloudRuntimeException(error); } @@ -4490,7 +4480,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic AttachAnswer answer = null; StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId()); if (logger.isTraceEnabled() && volumeToAttachStoragePool != null) { - logger.trace(String.format("storage is gotten from volume to attach: %s/%s",volumeToAttachStoragePool.getName(),volumeToAttachStoragePool.getUuid())); + logger.trace("storage is gotten from volume to attach: {}", volumeToAttachStoragePool); } HostVO host = getHostForVmVolumeAttachDetach(vm, volumeToAttachStoragePool); Long hostId = host != null ? host.getId() : null; @@ -4520,7 +4510,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { volService.checkAndRepairVolumeBasedOnConfig(volFactory.getVolume(volumeToAttach.getId()), host); } catch (Exception e) { - logger.debug(String.format("Unable to check and repair volume [%s] on host [%s], due to %s.", volumeToAttach.getName(), host, e.getMessage())); + logger.debug("Unable to check and repair volume [{}] on host [{}], due to {}.", volumeToAttach, host, e.getMessage()); } try { @@ -4591,7 +4581,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic controllerInfo.put(VmDetailConstants.ROOT_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER)); controllerInfo.put(VmDetailConstants.DATA_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER)); cmd.setControllerInfo(controllerInfo); - logger.debug("Attach volume id:" + volumeToAttach.getId() + " on VM id:" + vm.getId() + " has controller info:" + controllerInfo); + logger.debug("Attach volume {} on VM {} has controller info: {}", volumeToAttach, vm, controllerInfo); try { answer = (AttachAnswer)_agentMgr.send(hostId, cmd); @@ -4674,10 +4664,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic VolumeInfo volInfo = volFactory.getVolume(volumeToAttach.getId()); if (attached) { ev = Volume.Event.OperationSucceeded; - logger.debug("Volume: " + volInfo.getName() + " successfully attached to VM: " + volInfo.getAttachedVmName()); + logger.debug("Volume: {} successfully attached to VM: {}", volInfo.getVolume(), volInfo.getAttachedVM()); provideVMInfo(dataStore, vm.getId(), volInfo.getId()); } else { - logger.debug("Volume: " + volInfo.getName() + " failed to attach to VM: " + volInfo.getAttachedVmName()); + logger.debug("Volume: {} failed to attach to VM: {}", volInfo.getVolume(), volInfo.getAttachedVM()); } volInfo.stateTransit(ev); } @@ -4764,7 +4754,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } for (VolumeVO vol : vols) { if (vol.getDeviceId().equals(deviceId)) { - throw new RuntimeException("deviceId " + deviceId + " is used by vm " + vm.getId()); + throw new RuntimeException(String.format("deviceId %d is used by vol %s on vm %s", deviceId, vol, vm)); } } } else { @@ -4778,7 +4768,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic devIds.remove(vol.getDeviceId().toString().trim()); } if (devIds.isEmpty()) { - throw new RuntimeException("All device Ids are used by vm " + vm.getId()); + throw new RuntimeException(String.format("All device Ids are used by vm %s", vm)); } deviceId = Long.parseLong(devIds.iterator().next()); } @@ -4913,7 +4903,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); AsyncJobVO jobVo = _jobMgr.getAsyncJob(workJob.getId()); - logger.debug("New job " + workJob.getId() + ", result field: " + jobVo.getResult()); + logger.debug("New job {}, result field: {}", workJob, jobVo.getResult()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index bd0c0eff1bc..488e77ede29 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -203,11 +203,13 @@ public class DownloadListener implements Listener { } public void logDisconnect() { - logger.warn("Unable to monitor download progress of " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); + logger.warn("Unable to monitor download progress of {} : uuid: {}({}) at host [id: {}, uuid: {}]", + object.getType(), object.getUuid(), object, _ssAgent.getId(), _ssAgent.getUuid()); } public void log(String message, Level level) { - logger.log(level, message + ", " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); + logger.log(level, "{}, {}: {}({}) at host [id: {}, uuid: {}]", + message, object.getType(), object.getUuid(), object, _ssAgent.getId(), _ssAgent.getUuid()); } public DownloadListener(DownloadMonitorImpl monitor) { diff --git a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java index d21257516e2..67d5b091a03 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java @@ -171,7 +171,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - logger.warn("Unable to start /resume download of template " + template.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of template {} to {}", template, store, e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -228,7 +228,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - logger.warn("Unable to start /resume download of snapshot " + snapshot.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of snapshot {} to {}", snapshot, store, e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -305,7 +305,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - logger.warn("Unable to start /resume download of volume " + volume.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of volume {} to {}", volume, store, e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 78221d65d59..a0e10c646b5 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -125,18 +125,18 @@ public class StoragePoolMonitor implements Listener { } if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(pool.getClusterId())) { - throw new ConnectionException(true, "Unable to prepare OCFS2 nodes for pool " + pool.getId()); + throw new ConnectionException(true, String.format("Unable to prepare OCFS2 nodes for pool %s", pool)); } Long hostId = host.getId(); if (logger.isDebugEnabled()) { - logger.debug("Host " + hostId + " connected, connecting host to shared pool id " + pool.getId() + " and sending storage pool information ..."); + logger.debug("Host {} connected, connecting host to shared pool {} and sending storage pool information ...", host, pool); } try { - _storageManager.connectHostToSharedPool(hostId, pool.getId()); + _storageManager.connectHostToSharedPool(host, pool.getId()); _storageManager.createCapacityEntry(pool.getId()); } catch (Exception e) { - throw new ConnectionException(true, "Unable to connect host " + hostId + " to storage pool id " + pool.getId() + " due to " + e.toString(), e); + throw new ConnectionException(true, String.format("Unable to connect host %s to storage pool %s due to %s", host, pool, e.toString()), e); } } } @@ -145,9 +145,14 @@ public class StoragePoolMonitor implements Listener { @Override public synchronized boolean processDisconnect(long agentId, Status state) { + return processDisconnect(agentId, null, null, state); + } + + @Override + public synchronized boolean processDisconnect(long agentId, String uuid, String name, Status state) { Host host = _storageManager.getHost(agentId); if (host == null) { - logger.warn("Agent: " + agentId + " not found, not disconnecting pools"); + logger.warn("Agent [id: {}, uuid: {}, name: {}] not found, not disconnecting pools", agentId, uuid, name); return false; } @@ -158,7 +163,7 @@ public class StoragePoolMonitor implements Listener { List storagePoolHosts = _storageManager.findStoragePoolsConnectedToHost(host.getId()); if (storagePoolHosts == null) { if (logger.isTraceEnabled()) { - logger.trace("No pools to disconnect for host: " + host.getId()); + logger.trace("No pools to disconnect for host: {}", host); } return true; } @@ -180,9 +185,9 @@ public class StoragePoolMonitor implements Listener { } try { - _storageManager.disconnectHostFromSharedPool(host.getId(), pool.getId()); + _storageManager.disconnectHostFromSharedPool(host, pool); } catch (Exception e) { - logger.error("Unable to disconnect host " + host.getId() + " from storage pool id " + pool.getId() + " due to " + e.toString()); + logger.error("Unable to disconnect host {} from storage pool {} due to {}", host, pool, e.toString()); disconnectResult = false; } } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java index dd63371b888..cce580d4106 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.snapshot; +import com.cloud.user.Account; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.config.ConfigKey; @@ -68,7 +69,7 @@ public interface SnapshotManager extends Configurable { * @param accountId * The account which is to be deleted. */ - boolean deleteSnapshotDirsForAccount(long accountId); + boolean deleteSnapshotDirsForAccount(Account account); //void deleteSnapshotsDirForVolume(String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 50c8ff8b83a..572e0ace723 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -349,7 +349,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } if (Type.GROUP.name().equals(snapshot.getTypeDescription())) { - throw new InvalidParameterValueException(String.format("The snapshot [%s] is part of a [%s] snapshots and cannot be reverted separately", snapshotId, snapshot.getTypeDescription())); + throw new InvalidParameterValueException(String.format("The snapshot [%s] is part of a [%s] snapshots and cannot be reverted separately", snapshot, snapshot.getTypeDescription())); } VolumeVO volume = _volsDao.findById(snapshot.getVolumeId()); if (volume.getState() != Volume.State.Ready) { @@ -383,8 +383,8 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT); if (snapshotStrategy == null) { - logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); - String errorMsg = String.format("Revert snapshot command failed for snapshot with id %d, because this command is supported only for KVM hypervisor", snapshotId); + logger.error("Unable to find snapshot strategy to handle snapshot {}", snapshot); + String errorMsg = String.format("Revert snapshot command failed for snapshot %s, because this command is supported only for KVM hypervisor", snapshot); throw new CloudRuntimeException(errorMsg); } @@ -484,7 +484,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotVO snapshot = _snapshotDao.findById(snapshotId); if (snapshot == null || snapshot.getRemoved() != null) { - logger.error("Unable to find active [{}].", snapshot); + logger.error("Unable to find active snapshot [{}] with id {}.", snapshot, snapshotId); throw new InvalidParameterValueException("Unable to find active snapshot."); } @@ -508,7 +508,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement for (DataStore store : imageStores) { snapshotDataStoreReference = _snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, store.getId(), snapshotId); if (snapshotDataStoreReference == null) { - logger.trace("Snapshot [{}] not in store [{}].", snapshotId, store.getId()); + logger.trace("Snapshot [{}] not in store [{}].", snapshot, store); continue; } String existingExtractUrl = snapshotDataStoreReference.getExtractUrl(); @@ -517,12 +517,12 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return existingExtractUrl; } chosenStore = (ImageStoreEntity) store; - logger.debug("Snapshot [{}] found in store [{}].", snapshotId, chosenStore.getId()); + logger.debug("Snapshot [{}] found in store [{}].", snapshot, chosenStore); break; } if (ObjectUtils.anyNull(chosenStore, snapshotDataStoreReference)) { - logger.error("Snapshot [{}] not found in any secondary storage.", snapshotId); + logger.error("Snapshot [{}] not found in any secondary storage.", snapshot); throw new InvalidParameterValueException("Snapshot not found."); } @@ -554,8 +554,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement try { snapshotSrv.deleteSnapshot(snapshotOnPrimary); } catch (Exception e) { - throw new CloudRuntimeException("Snapshot archived to Secondary Storage but there was an error deleting " - + " the snapshot on Primary Storage. Please manually delete the primary snapshot " + snapshotId, e); + throw new CloudRuntimeException(String.format("Snapshot archived to Secondary Storage but there was an error deleting the snapshot on Primary Storage. Please manually delete the primary snapshot %s", snapshotOnPrimary), e); } return snapshotOnSecondary; @@ -590,7 +589,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException("Volume: " + volume + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); } DataStore store = volume.getDataStore(); @@ -618,7 +617,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement try { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.BACKUP); if (snapshotStrategy == null) { - throw new CloudRuntimeException("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); + throw new CloudRuntimeException(String.format("Unable to find snapshot strategy to handle snapshot [%s]", snapshot)); } snapshotInfo = snapshotStrategy.backupSnapshot(snapshotInfo); @@ -707,12 +706,12 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotVO oldestSnapshot = snaps.get(0); long oldSnapId = oldestSnapshot.getId(); if (policy != null) { - logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); + logger.debug("Max snaps: {} exceeded for snapshot policy {}. Deleting oldest snapshot: {}", policy.getMaxSnaps(), policy, oldestSnapshot); } if (deleteSnapshot(oldSnapId, null)) { //log Snapshot delete event ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestSnapshot.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_SNAPSHOT_DELETE, - "Successfully deleted oldest snapshot: " + oldSnapId, oldSnapId, ApiCommandResourceType.Snapshot.toString(), 0); + String.format("Successfully deleted oldest snapshot: %s", oldestSnapshot), oldSnapId, ApiCommandResourceType.Snapshot.toString(), 0); } snaps.remove(oldestSnapshot); } @@ -760,11 +759,11 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } if (Type.GROUP.name().equals(snapshotCheck.getTypeDescription())) { - throw new InvalidParameterValueException(String.format("The snapshot [%s] is part of a [%s] snapshots and cannot be deleted separately", snapshotId, snapshotCheck.getTypeDescription())); + throw new InvalidParameterValueException(String.format("The snapshot [%s] is part of a [%s] snapshots and cannot be deleted separately", snapshotCheck, snapshotCheck.getTypeDescription())); } if (snapshotCheck.getState() == Snapshot.State.Destroyed) { - throw new InvalidParameterValueException("Snapshot with id: " + snapshotId + " is already destroyed"); + throw new InvalidParameterValueException(String.format("Snapshot [%s] is already destroyed", snapshotCheck)); } _accountMgr.checkAccess(caller, null, true, snapshotCheck); @@ -772,7 +771,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, zoneId, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); + logger.error("Unable to find snapshot strategy to handle snapshot [{}]", snapshotCheck); return false; } @@ -806,7 +805,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return result; } catch (Exception e) { - logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); + logger.debug("Failed to delete snapshot {}:{}", snapshotCheck, e.toString()); throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); } @@ -932,8 +931,9 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } @Override - public boolean deleteSnapshotDirsForAccount(long accountId) { + public boolean deleteSnapshotDirsForAccount(Account account) { + long accountId = account.getId(); List volumes = _volsDao.findIncludingRemovedByAccount(accountId); // The above call will list only non-destroyed volumes. // So call this method before marking the volumes as destroyed. @@ -963,11 +963,11 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement answer = ep.sendMessage(cmd); } if ((answer != null) && answer.getResult()) { - logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId); + logger.debug("Deleted all snapshots for volume {} under account {}", volume, account); } else { success = false; if (answer != null) { - logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri()); + logger.warn("Failed to delete all snapshot for volume {} on secondary storage {}", volume, ssHost.getUri()); logger.error(answer.getDetails()); } } @@ -978,7 +978,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement for (SnapshotVO snapshot : snapshots) { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshot.getId() + "'"); + logger.error("Unable to find snapshot strategy to handle snapshot [{}]", snapshot); continue; } List snapshotStoreRefs = _snapshotStoreDao.listReadyBySnapshot(snapshot.getId(), DataStoreRole.Image); @@ -1038,14 +1038,12 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement throw new UnsupportedOperationException(String.format("Encrypted volumes don't support snapshot schedules, cannot create snapshot policy for the volume [%s]", volume.getUuid())); } - String volumeDescription = volume.getVolumeDescription(); - final Account caller = CallContext.current().getCallingAccount(); _accountMgr.checkAccess(caller, null, true, volume); // If display is false we don't actually schedule snapshots. if (volume.getState() != Volume.State.Ready && display) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume, Volume.State.Ready, volume.getState())); } if (volume.getTemplateId() != null) { @@ -1056,7 +1054,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement userVmVO = _vmDao.findById(instanceId); } if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()) || !UserVmManager.SHAREDFSVM.equals(userVmVO.getUserVmType()))) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); + throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume)); } } @@ -1067,7 +1065,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement // It is not detached, but attached to a VM if (_vmDao.findById(instanceId) == null) { // It is not a UserVM but a SystemVM or DomR - throw new InvalidParameterValueException(String.format("Failed to create snapshot policy [%s] for volume %s; Snapshots of volumes attached to System or router VM are not allowed.", intervalType, volumeDescription)); + throw new InvalidParameterValueException(String.format("Failed to create snapshot policy [%s] for volume %s; Snapshots of volumes attached to System or router VM are not allowed.", intervalType, volume)); } } @@ -1081,8 +1079,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement TimeZone timeZone = TimeZone.getTimeZone(cmdTimezone); String timezoneId = timeZone.getID(); if (!timezoneId.equals(cmdTimezone)) { - logger.warn(String.format("Using timezone [%s] for running the snapshot policy [%s] for volume %s, as an equivalent of [%s].", timezoneId, intervalType, volumeDescription, - cmdTimezone)); + logger.warn("Using timezone [{}] for running the snapshot policy [{}] for volume {}, as an equivalent of [{}].", timezoneId, intervalType, volume, cmdTimezone); } String schedule = cmd.getSchedule(); @@ -1091,18 +1088,18 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement DateUtil.getNextRunTime(intvType, schedule, timezoneId, null); } catch (Exception e) { throw new InvalidParameterValueException(String.format("%s has an invalid schedule [%s] for interval type [%s].", - volumeDescription, schedule, intervalType)); + volume, schedule, intervalType)); } int maxSnaps = cmd.getMaxSnaps(); if (maxSnaps <= 0) { - throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s should be greater than 0.", maxSnaps, volumeDescription)); + throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s should be greater than 0.", maxSnaps, volume)); } int intervalMaxSnaps = type.getMax(); if (maxSnaps > intervalMaxSnaps) { - throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s exceeds limit [%s] for interval type [%s].", maxSnaps, volumeDescription, + throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s exceeds limit [%s] for interval type [%s].", maxSnaps, volume, intervalMaxSnaps, intervalType)); } @@ -1131,16 +1128,15 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement protected SnapshotPolicyVO persistSnapshotPolicy(VolumeVO volume, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean display, boolean active, Map tags, List zoneIds) { long volumeId = volume.getId(); - String volumeDescription = volume.getVolumeDescription(); GlobalLock createSnapshotPolicyLock = GlobalLock.getInternLock("createSnapshotPolicy_" + volumeId); boolean isLockAcquired = createSnapshotPolicyLock.lock(5); if (!isLockAcquired) { - throw new CloudRuntimeException(String.format("Unable to acquire lock for creating snapshot policy [%s] for %s.", intervalType, volumeDescription)); + throw new CloudRuntimeException(String.format("Unable to acquire lock for creating snapshot policy [%s] for %s.", intervalType, volume)); } - logger.debug(String.format("Acquired lock for creating snapshot policy [%s] for volume %s.", intervalType, volumeDescription)); + logger.debug("Acquired lock for creating snapshot policy [{}] for volume {}.", intervalType, volume); try { SnapshotPolicyVO policy = _snapshotPolicyDao.findOneByVolumeInterval(volumeId, intervalType); @@ -1211,8 +1207,8 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement IntervalType[] intervalTypes = IntervalType.values(); List policies = listPoliciesforVolume(srcVolume.getId()); - logger.debug(String.format("Copying snapshot policies %s from volume %s to volume %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(policies, - "id", "uuid"), srcVolume.getVolumeDescription(), destVolume.getVolumeDescription())); + logger.debug("Copying snapshot policies {} from volume {} to volume {}.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(policies, + "id", "uuid"), srcVolume, destVolume); for (SnapshotPolicyVO policy : policies) { List details = snapshotPolicyDetailsDao.findDetails(policy.getId(), ApiConstants.ZONE_ID); @@ -1410,8 +1406,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); if (userVm != null) { if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { - throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() + " is in " - + userVm.getState().toString() + " state"); + throw new CloudRuntimeException(String.format("Creating snapshot failed due to volume: %s is associated with vm: %s is in %s state", volume, userVm, userVm.getState().toString())); } if (userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { @@ -1448,7 +1443,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); if (snapshotStrategy == null) { - throw new CloudRuntimeException("Can't find snapshot strategy to deal with snapshot:" + snapshotId); + throw new CloudRuntimeException(String.format("Can't find snapshot strategy to deal with snapshot:%s", snapshot.getSnapshotVO())); } SnapshotInfo snapshotOnPrimary = snapshotStrategy.takeSnapshot(snapshot); @@ -1457,7 +1452,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement if (backupSnapToSecondary) { backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds()); } else { - logger.debug("skipping backup of snapshot [uuid=" + snapshot.getUuid() + "] to secondary due to configuration"); + logger.debug("skipping backup of snapshot [{}] to secondary due to configuration", snapshot); snapshotOnPrimary.markBackedUp(); } @@ -1469,7 +1464,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement List snapshotStoreRefs = _snapshotStoreDao.listReadyBySnapshot(snapshotId, dataStoreRole); if (CollectionUtils.isEmpty(snapshotStoreRefs)) { - throw new CloudRuntimeException(String.format("Could not find snapshot %s [%s] on [%s]", snapshot.getName(), snapshot.getUuid(), snapshot.getLocationType())); + throw new CloudRuntimeException(String.format("Could not find snapshot %s on [%s]", snapshot.getSnapshotVO(), snapshot.getLocationType())); } SnapshotDataStoreVO snapshotStoreRef = snapshotStoreRefs.get(0); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, @@ -1540,10 +1535,10 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } } catch (final Exception e) { if (attempts >= 0) { - logger.debug("Backing up of snapshot failed, for snapshot with ID " + snapshot.getSnapshotId() + ", left with " + attempts + " more attempts"); + logger.debug("Backing up of snapshot failed, for snapshot {}, left with {} more attempts", snapshot, attempts); backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshot, --attempts, snapshotStrategy, zoneIds), snapshotBackupRetryInterval, TimeUnit.SECONDS); } else { - logger.debug("Done with " + snapshotBackupRetries + " attempts in backing up of snapshot with ID " + snapshot.getSnapshotId()); + logger.debug("Done with {} attempts in backing up of snapshot {}", snapshotBackupRetries, snapshot.getSnapshotVO()); snapshotSrv.cleanupOnSnapshotBackupFailure(snapshot); } } @@ -1591,10 +1586,10 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement for (SnapshotVO snapshotVO : snapshots) { try { if (!deleteSnapshot(snapshotVO.getId(), null)) { - logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); + logger.debug("Failed to delete snapshot in destroying state: {}", snapshotVO); } } catch (Exception e) { - logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); + logger.debug("Failed to delete snapshot in destroying state: {}", snapshotVO); } } return true; @@ -1677,7 +1672,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement snapshotSrv.deleteSnapshot(info); } } catch (CloudRuntimeException e) { - String msg = "Cleanup of Snapshot with uuid " + info.getUuid() + " in primary storage is failed. Ignoring"; + String msg = String.format("Cleanup of Snapshot %s in primary storage is failed. Ignoring", info); logger.warn(msg); } } @@ -1704,7 +1699,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement _resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue()); } catch (ResourceAllocationException e) { if (snapshotType != Type.MANUAL) { - String msg = "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots"; + String msg = String.format("Snapshot resource limit exceeded for account %s. Failed to create recurring snapshots", owner); logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots; please use updateResourceLimit to increase the limit"); @@ -1748,7 +1743,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotVO snapshot = _snapshotDao.persist(snapshotVO); if (snapshot == null) { - throw new CloudRuntimeException("Failed to create snapshot for volume: " + volume.getId()); + throw new CloudRuntimeException(String.format("Failed to create snapshot for volume: %s", volume)); } CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid()); _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot); @@ -1775,7 +1770,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } if (dstSnapshotStore.getState() == ObjectInDataStoreStateMachine.State.Ready) { if (!dstSnapshotStore.isDisplay()) { - logger.debug(String.format("Snapshot ID: %d is in ready state on image store ID: %d, marking it displayable for view", snapshotId, dstSnapshotStore.getDataStoreId())); + logger.debug("Snapshot ID: {} is in ready state on image store: {}, marking it displayable for view", snapshotId, dstSecStore); dstSnapshotStore.setDisplay(true); _snapshotStoreDao.update(dstSnapshotStore.getId(), dstSnapshotStore); } @@ -1812,10 +1807,10 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement copyUrl = result.getPath(); } } catch (InterruptedException | ExecutionException | ResourceUnavailableException ex) { - logger.error(String.format("Failed to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName()), ex); + logger.error("Failed to prepare URL for copy for snapshot ID: {} on store: {}", snapshotId, srcSecStore, ex); } if (StringUtils.isEmpty(copyUrl)) { - logger.error(String.format("Unable to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName())); + logger.error("Unable to prepare URL for copy for snapshot ID: {} on store: {}", snapshotId, srcSecStore); return false; } logger.debug(String.format("Copying snapshot ID: %d to destination zones using download URL: %s", snapshotId, copyUrl)); @@ -1823,7 +1818,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement AsyncCallFuture future = snapshotSrv.copySnapshot(snapshotOnSecondary, copyUrl, dstSecStore); SnapshotResult result = future.get(); if (result.isFailed()) { - logger.debug(String.format("Copy snapshot ID: %d failed for image store %s: %s", snapshotId, dstSecStore.getName(), result.getResult())); + logger.debug("Copy snapshot ID: {} failed for image store {}: {}", snapshotId, dstSecStore, result.getResult()); return false; } snapshotZoneDao.addSnapshotToZone(snapshotId, dstZoneId); @@ -1835,7 +1830,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } return true; } catch (InterruptedException | ExecutionException | ResourceUnavailableException ex) { - logger.debug(String.format("Failed to copy snapshot ID: %d to image store: %s", snapshotId, dstSecStore.getName())); + logger.debug("Failed to copy snapshot ID: {} to image store: {}", snapshotId, dstSecStore); } return false; } @@ -1852,9 +1847,8 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement do { dstSecStore = getSnapshotZoneImageStore(currentSnap.getSnapshotId(), destZone.getId()); if (dstSecStore != null) { - logger.debug(String.format("Snapshot ID: %d is already present in secondary storage: %s" + - " in zone %s in ready state, don't need to copy any further", - currentSnap.getSnapshotId(), dstSecStore.getName(), destZone)); + logger.debug("Snapshot {} is already present in secondary storage: {}" + + " in zone {} in ready state, don't need to copy any further", snapshotVO, dstSecStore, destZone); if (snapshotId == currentSnap.getSnapshotId()) { checkAndProcessSnapshotAlreadyExistInStore(snapshotId, dstSecStore); } @@ -1887,11 +1881,11 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement throw new StorageUnavailableException("Destination zone is not ready, no image store with free capacity", DataCenter.class, destZoneId); } } - logger.debug(String.format("Copying snapshot chain for snapshot ID: %d on secondary store: %s of zone ID: %d", snapshotId, dstSecStore.getName(), destZoneId)); + logger.debug("Copying snapshot chain for snapshot ID: {} on secondary store: {} of zone ID: {}", snapshotVO, dstSecStore, destZone); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotChain) { if (!copySnapshotToZone(snapshotDataStoreVO, srcSecStore, destZone, dstSecStore, account)) { - logger.error(String.format("Failed to copy snapshot: %s to zone: %s due to failure to copy snapshot ID: %d from snapshot chain", - snapshotVO, destZone, snapshotDataStoreVO.getSnapshotId())); + logger.error("Failed to copy snapshot: {} to zone: {} due to failure to copy snapshot ID: {} from snapshot chain", + snapshotVO, destZone, snapshotDataStoreVO.getSnapshotId()); return false; } } @@ -2001,7 +1995,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement String completedEventLevel = EventVO.LEVEL_ERROR; String completedEventMsg = String.format("Copying snapshot ID: %s failed", snapshotVO.getUuid()); if (dataStore == null) { - logger.error(String.format("Unable to find an image store for zone ID: %d where snapshot %s is in Ready state", zoneId, snapshotVO)); + logger.error("Unable to find an image store for zone: {} where snapshot {} is in Ready state", dataCenterDao.findById(zoneId), snapshotVO); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), completedEventLevel, EventTypes.EVENT_SNAPSHOT_COPY, completedEventMsg, snapshotId, ApiCommandResourceType.Snapshot.toString(), startEventId); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 2a53021636c..8d4fd0e7aed 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -126,7 +126,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu nextTimestamp = DateUtil.getNextRunTime(type, schedule, timezone, currentTimestamp); final String currentTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, currentTimestamp); final String nextScheduledTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, nextTimestamp); - logger.debug("Current time is " + currentTime + ". NextScheduledTime of policyId " + policyId + " is " + nextScheduledTime); + logger.debug("Current time is {}. NextScheduledTime of policy {} is {}", currentTime, policy, nextScheduledTime); } return nextTimestamp; } @@ -220,7 +220,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu long diffInHours = TimeUnit.MILLISECONDS.toHours(now.getTime() - creationTime.getTime()); if (diffInHours >= expiration_interval_hours) { if (logger.isDebugEnabled()){ - logger.debug("Deleting expired VM snapshot id: " + vmSnapshot.getId()); + logger.debug("Deleting expired VM snapshot: {}", vmSnapshot); } _vmSnaphostManager.deleteVMSnapshot(vmSnapshot.getId()); } @@ -240,9 +240,8 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu final long snapshotScheId = snapshotToBeExecuted.getId(); final long policyId = snapshotToBeExecuted.getPolicyId(); final long volumeId = snapshotToBeExecuted.getVolumeId(); + final VolumeVO volume = _volsDao.findByIdIncludingRemoved(snapshotToBeExecuted.getVolumeId()); try { - final VolumeVO volume = _volsDao.findByIdIncludingRemoved(snapshotToBeExecuted.getVolumeId()); - if (!canSnapshotBeScheduled(snapshotToBeExecuted, volume)) { continue; } @@ -252,7 +251,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, volume.getAccountId(), EventTypes.EVENT_SNAPSHOT_CREATE, "creating snapshot for volume Id:" + volume.getUuid(), volumeId, ApiCommandResourceType.Volume.toString(), true, 0); - logger.trace(String.format("Mapping parameters required to generate a CreateSnapshotCmd for snapshot [%s].", snapshotToBeExecuted.getUuid())); + logger.trace("Mapping parameters required to generate a CreateSnapshotCmd for snapshot [{}].", snapshotToBeExecuted); final Map params = new HashMap(); params.put(ApiConstants.VOLUME_ID, "" + volumeId); params.put(ApiConstants.POLICY_ID, "" + policyId); @@ -269,7 +268,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu } } - logger.trace(String.format("Generating a CreateSnapshotCmd for snapshot [%s] with parameters: [%s].", snapshotToBeExecuted.getUuid(), params.toString())); + logger.trace("Generating a CreateSnapshotCmd for snapshot [{}] with parameters: [{}].", snapshotToBeExecuted, params.toString()); final CreateSnapshotCmd cmd = new CreateSnapshotCmd(); ComponentContext.inject(cmd); _dispatcher.dispatchCreateCmd(cmd, params); @@ -278,18 +277,18 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu final Date scheduledTimestamp = snapshotToBeExecuted.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - logger.debug(String.format("Scheduling snapshot [%s] for volume [%s] at [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), displayTime)); + logger.debug("Scheduling snapshot [{}] for volume [{}] at [{}].", snapshotToBeExecuted, volume, displayTime); AsyncJobVO job = new AsyncJobVO("", User.UID_SYSTEM, volume.getAccountId(), CreateSnapshotCmd.class.getName(), ApiGsonHelper.getBuilder().create().toJson(params), cmd.getEntityId(), cmd.getApiResourceType() != null ? cmd.getApiResourceType().toString() : null, null); job.setDispatcher(_asyncDispatcher.getName()); final long jobId = _asyncMgr.submitAsyncJob(job); - logger.debug(String.format("Scheduled snapshot [%s] for volume [%s] as job [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), job.getUuid())); + logger.debug("Scheduled snapshot [{}] for volume [{}] as job [{}].", snapshotToBeExecuted, volume, job); tmpSnapshotScheduleVO.setAsyncJobId(jobId); _snapshotScheduleDao.update(snapshotScheId, tmpSnapshotScheduleVO); } catch (final Exception e) { - logger.error(String.format("The scheduling of snapshot [%s] for volume [%s] failed due to [%s].", snapshotToBeExecuted.getUuid(), volumeId, e.toString()), e); + logger.error("The scheduling of snapshot [{}] for volume [{}] failed due to [{}].", snapshotToBeExecuted, volume, e.toString(), e); } finally { if (tmpSnapshotScheduleVO != null) { _snapshotScheduleDao.releaseFromLockTable(snapshotScheId); @@ -307,14 +306,13 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu */ protected boolean canSnapshotBeScheduled(final SnapshotScheduleVO snapshotToBeScheduled, final VolumeVO volume) { if (volume.getRemoved() != null) { - logger.warn(String.format("Skipping snapshot [%s] for volume [%s] because it has been removed. Having a snapshot scheduled for a volume that has been " - + "removed is an inconsistency; please, check your database.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); + logger.warn("Skipping snapshot [{}] for volume [{}] because it has been removed. Having a snapshot scheduled for a volume that has been " + + "removed is an inconsistency; please, check your database.", snapshotToBeScheduled, volume); return false; } if (volume.getPoolId() == null) { - logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because it is not attached to any storage pool.", snapshotToBeScheduled.getUuid(), - volume.getVolumeDescription())); + logger.debug("Skipping snapshot [{}] for volume [{}] because it is not attached to any storage pool.", snapshotToBeScheduled, volume); return false; } @@ -323,12 +321,13 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu } if (_snapshotPolicyDao.findById(snapshotToBeScheduled.getPolicyId()) == null) { - logger.debug(String.format("Snapshot's policy [%s] for volume [%s] has been removed; therefore, this snapshot will be removed from the snapshot scheduler.", - snapshotToBeScheduled.getPolicyId(), volume.getVolumeDescription())); + logger.debug("Snapshot's policy [{}] for volume [{}] has been removed; " + + "therefore, this snapshot will be removed from the snapshot scheduler.", + snapshotToBeScheduled.getPolicyId(), volume); _snapshotScheduleDao.remove(snapshotToBeScheduled.getId()); } - logger.debug(String.format("Snapshot [%s] for volume [%s] can be executed.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); + logger.debug("Snapshot [{}] for volume [{}] can be executed.", snapshotToBeScheduled, volume); return true; } @@ -336,14 +335,13 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu Account volAcct = _acctDao.findById(volume.getAccountId()); if (volAcct == null) { - logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] has been removed.", snapshotToBeExecuted.getUuid(), - volume.getVolumeDescription(), volume.getAccountId())); + logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] has been removed.", + snapshotToBeExecuted, volume, volume.getAccountId())); return true; } if (volAcct.getState() == Account.State.DISABLED) { - logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] is disabled.", snapshotToBeExecuted.getUuid(), - volume.getVolumeDescription(), volAcct.getUuid())); + logger.debug("Skipping snapshot [{}] for volume [{}] because its account [{}] is disabled.", snapshotToBeExecuted, volume, volAcct); return true; } @@ -385,7 +383,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu } if (_volsDao.findById(policy.getVolumeId()) == null) { - logger.warn("Found snapshot policy ID: " + policyId + " for volume ID: " + policy.getVolumeId() + " that does not exist or has been removed"); + logger.warn("Found snapshot policy: {} for volume ID: {} that does not exist or has been removed", policy, policy.getVolumeId()); removeSchedule(policy.getVolumeId(), policy.getId()); return null; } @@ -440,7 +438,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu success = _snapshotScheduleDao.remove(schedule.getId()); } if (!success) { - logger.debug("Error while deleting Snapshot schedule with Id: " + schedule.getId()); + logger.debug("Error while deleting Snapshot schedule: " + schedule); } return success; } diff --git a/server/src/main/java/com/cloud/storage/upload/UploadListener.java b/server/src/main/java/com/cloud/storage/upload/UploadListener.java index 9709f5f9477..7c12387d788 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadListener.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadListener.java @@ -349,7 +349,7 @@ public class UploadListener implements Listener { } public void log(String message, Level level) { - logger.log(level, message + ", " + type.toString() + " = " + typeName + " at host " + sserver.getName()); + logger.log(level, message + ", " + type.toString() + " = " + typeName + " at host " + sserver); } public void setDisconnected() { @@ -463,7 +463,7 @@ public class UploadListener implements Listener { } public void logDisconnect() { - logger.warn("Unable to monitor upload progress of " + typeName + " at host " + sserver.getName()); + logger.warn("Unable to monitor upload progress of {} at host {}", typeName, sserver); } public void scheduleImmediateStatusCheck(RequestType request) { diff --git a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java index 6b503ec3a50..7962d9dced9 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java @@ -162,7 +162,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - logger.warn("Unable to start upload of volume " + volume.getName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of volume {} from {} to {}", volume, secStore, url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -199,7 +199,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - logger.warn("Unable to start upload of " + template.getUniqueName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of {} from {} to {}", template, secStore, url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -262,7 +262,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)store).getMountPoint(), path, uuid, null, null); Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { - errorString = "Unable to create a link for " + type + " id:" + template.getId() + "," + (ans == null ? "" : ans.getDetails()); + errorString = String.format("Unable to create a link for %s [%s]: %s", type, template, ans == null ? "" : ans.getDetails()); logger.error(errorString); throw new CloudRuntimeException(errorString); } @@ -428,7 +428,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { logger.warn("Huh? Agent id " + sserverId + " does not correspond to a row in hosts table?"); return; } - logger.debug("Handling upload sserverId " + sserverId); + logger.debug("Handling upload sserver {}", storageHost); List uploadsInProgress = new ArrayList(); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.UPLOAD_IN_PROGRESS)); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.COPY_IN_PROGRESS)); @@ -494,7 +494,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { new DeleteEntityDownloadURLCommand(path, extractJob.getType(), extractJob.getUploadUrl(), ((ImageStoreVO)secStore).getParent()); EndPoint ep = _epSelector.select(secStore); if (ep == null) { - logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host " + extractJob.getDataStoreId()); + logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host {}", secStore); continue; //TODO: why continue? why not break? } if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java index d9c98e2ef92..caae8f133a4 100644 --- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java @@ -165,12 +165,13 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso protected void checkTagsDeletePermission(List tagsToDelete, Account caller) { for (ResourceTag resourceTag : tagsToDelete) { + Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); if(logger.isDebugEnabled()) { - logger.debug("Resource Tag Id: " + resourceTag.getResourceId()); - logger.debug("Resource Tag AccountId: " + resourceTag.getAccountId()); + logger.debug("Resource Tag Id: {}, Uuid: {}, Type: {}, Account: {}", + resourceTag.getResourceId(), resourceTag.getResourceUuid(), + resourceTag.getResourceType(), owner); } if (caller.getAccountId() != resourceTag.getAccountId()) { - Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); if(logger.isDebugEnabled()) { logger.debug("Resource Owner: " + owner); } @@ -203,8 +204,8 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso Long domainId = accountDomainPair.second(); Long accountId = accountDomainPair.first(); - resourceManagerUtil.checkResourceAccessible(accountId, domainId, "Account '" + caller + - "' doesn't have permissions to create tags" + " for resource '" + id + "(" + key + ")'."); + resourceManagerUtil.checkResourceAccessible(accountId, domainId, + String.format("Account '%s' doesn't have permissions to create tags for resource [id: %d, uuid: %s] (%s).", caller, id, resourceUuid, key)); String value = tags.get(key); @@ -216,7 +217,7 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso try { resourceTag = _resourceTagDao.persist(resourceTag); } catch (EntityExistsException e) { - throw new CloudRuntimeException(String.format("tag %s already on %s with id %s", resourceTag.getKey(), resourceType.toString(), resourceId),e); + throw new CloudRuntimeException(String.format("tag %s already on %s with id %s", resourceTag.getKey(), resourceType, resourceUuid),e); } resourceTags.add(resourceTag); if (ResourceObjectType.UserVm.equals(resourceType)) { @@ -319,7 +320,7 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso Long poolId = volume.getPoolId(); DataStore dataStore = retrieveDatastore(poolId); if (dataStore == null || !(dataStore.getDriver() instanceof PrimaryDataStoreDriver)) { - logger.info(String.format("No data store found for VM %d with pool ID %d.", vmId, poolId)); + logger.info("No data store found for volume {} of VM {} with pool ID {}.", volume, vmId, poolId); continue; } PrimaryDataStoreDriver dataStoreDriver = (PrimaryDataStoreDriver) dataStore.getDriver(); diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 026a9350f33..fbf70a8eaad 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -182,10 +182,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { Integer connectRequestTimeout = DirectDownloadManager.DirectDownloadConnectionRequestTimeout.value(); Integer connectTimeout = DirectDownloadManager.DirectDownloadConnectTimeout.value(); CheckUrlCommand cmd = new CheckUrlCommand(format, url, connectTimeout, connectRequestTimeout, socketTimeout, followRedirects); - logger.debug("Performing URL " + url + " validation on host " + host.getId()); + logger.debug("Performing URL {} validation on host {}", url, host); Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { - throw new CloudRuntimeException("URL: " + url + " validation failed on host id " + host.getId()); + throw new CloudRuntimeException(String.format("URL: %s validation failed on host %s", url, host)); } CheckUrlAnswer ans = (CheckUrlAnswer) answer; return ans.getTemplateSize(); @@ -368,17 +368,17 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { DataCenterVO zone = _dcDao.findById(zoneId); if (zone == null) { - logger.warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", zoneId, imageStore.getId())); + logger.warn("Unable to find zone by id [{}], so skip downloading template to its image store [{}].", zoneId, imageStore); return false; } if (Grouping.AllocationState.Disabled == zone.getAllocationState()) { - logger.info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, imageStore.getId())); + logger.info("Zone [{}] is disabled. Skip downloading template to its image store [{}].", zone, imageStore); return false; } if (!_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - logger.info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", imageStore.getId())); + logger.info("Image store doesn't have enough capacity. Skip downloading template to this image store [{}].", imageStore); return false; } @@ -473,7 +473,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // update template_store_ref and template state EndPoint ep = _epSelector.select(templateOnStore); if (ep == null) { - String errMsg = "There is no secondary storage VM for downloading template to image store " + imageStore.getName(); + String errMsg = String.format("There is no secondary storage VM for downloading template to image store %s", imageStore); logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -539,8 +539,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (tmpltStore != null) { physicalSize = tmpltStore.getPhysicalSize(); } else { - logger.warn("No entry found in template_store_ref for template id: " + template.getId() + " and image store id: " + ds.getId() + - " at the end of registering template!"); + logger.warn("No entry found in template_store_ref for template: {} and image store: {} at the end of registering template!", template, ds); } Scope dsScope = ds.getScope(); if (dsScope.getScopeType() == ScopeType.ZONE) { @@ -548,7 +547,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { UsageEventUtils.publishUsageEvent(etype, template.getAccountId(), dsScope.getScopeId(), template.getId(), template.getName(), null, null, physicalSize, template.getSize(), VirtualMachineTemplate.class.getName(), template.getUuid()); } else { - logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); + logger.warn("Zone scope image store {} has a null scope id", ds); } } else if (dsScope.getScopeType() == ScopeType.REGION) { // publish usage event for region-wide image store using a -1 zoneId for 4.2, need to revisit post-4.2 @@ -596,7 +595,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (imageStores == null || imageStores.size() == 0) { // already destroyed on image stores success = true; - logger.info("Unable to find image store still having template: " + template.getName() + ", so just mark the template removed"); + logger.info("Unable to find image store still having template: {}, so just mark the template removed", template); } else { // Make sure the template is downloaded to all found image stores for (DataStore store : imageStores) { @@ -605,7 +604,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { for (TemplateDataStoreVO templateStore : templateStores) { if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) { String errorMsg = "Please specify a template that is not currently being downloaded."; - logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + "; can't delete it."); + logger.debug("Template: {} is currently being downloaded to secondary storage host: {}; can't delete it.", template, store); throw new CloudRuntimeException(errorMsg); } } @@ -629,16 +628,15 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { boolean dataDiskDeletetionResult = true; List dataDiskTemplates = templateDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && dataDiskTemplates.size() > 0) { - logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template"); + logger.info("Template: {} has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template", template); for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete Datadisk template: {} from image store: {}", dataDiskTemplate, imageStore); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(dataDiskTemplate.getId(), imageStore)); try { TemplateApiResult result = future.get(); dataDiskDeletetionResult = result.isSuccess(); if (!dataDiskDeletetionResult) { - logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: " - + result.getResult()); + logger.warn("Failed to delete datadisk template: {} from image store: {} due to: {}", dataDiskTemplate, imageStore, result.getResult()); break; } // Remove from template_zone_ref @@ -664,13 +662,13 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { } // remove from template_zone_ref if (dataDiskDeletetionResult) { - logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: {} from image store: {}", template, imageStore); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore)); try { TemplateApiResult result = future.get(); success = result.isSuccess(); if (!success) { - logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete the template: {} from the image store: {} due to: {}", template, imageStore, result.getResult()); break; } @@ -686,8 +684,8 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { throw new CloudRuntimeException("Delete template Failed", e); } } else { - logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk" - + " templates that belonged to the template failed"); + logger.warn("Template: {} won't be deleted from image store: {} " + + "because deletion of one of the Datadisk templates that belonged to the template failed", template, imageStore); } } @@ -701,7 +699,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // delete all cache entries for this template List cacheTmpls = imageFactory.listTemplateOnCache(template.getId()); for (TemplateInfo tmplOnCache : cacheTmpls) { - logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName()); + logger.info("Delete template: {} from image cache store: {}", tmplOnCache, tmplOnCache.getDataStore()); tmplOnCache.delete(); } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index 119589dcc65..b5be09376fc 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -236,7 +236,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat } Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone %s is currently disabled", zone)); } } } diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 41e0b6f93ff..6073b4f0bb7 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -525,8 +525,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (pool.getStatus() == StoragePoolStatus.Up && pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(vmTemplate, pool); } else { - logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " - + pool.getDataCenterId() + " is different from the requested zone " + zoneId + " or the pool is currently not available."); + logger.warn("Skip loading template {} into primary storage {} as " + + "either the pool zone {} is different from the requested zone {} or " + + "the pool is currently not available.", + vmTemplate::toString, pool::toString, () -> _dcDao.findById(pool.getDataCenterId()), () -> _dcDao.findById(zoneId)); } } } else { @@ -568,7 +570,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } if (!_accountMgr.isRootAdmin(caller.getId()) && !template.isExtractable()) { - throw new InvalidParameterValueException("Unable to extract template id=" + templateId + " as it's not extractable"); + throw new InvalidParameterValueException(String.format("Unable to extract template %s as it's not extractable", template)); } _accountMgr.checkAccess(caller, AccessType.OperateEntry, true, template); @@ -602,7 +604,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } // Handle NFS to S3 object store migration case, we trigger template sync from NFS to S3 during extract template or copy template - _tmpltSvr.syncTemplateToRegionStore(templateId, tmpltStore); + _tmpltSvr.syncTemplateToRegionStore(template, tmpltStore); TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, tmpltStore); String extractUrl = tmpltStore.createEntityExtractUrl(templateObject.getInstallPath(), template.getFormat(), templateObject); @@ -657,10 +659,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } private void prepareTemplateInOneStoragePool(final VMTemplateVO template, final StoragePoolVO pool) { - logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Schedule to preload template {} into primary storage {}", template, pool); if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { List childDataStores = _poolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); - logger.debug("Schedule to preload template " + template.getId() + " into child datastores of DataStore cluster: " + pool.getId()); + logger.debug("Schedule to preload template {} into child datastores of DataStore cluster: {}", template, pool); for (StoragePoolVO childDataStore : childDataStores) { prepareTemplateInOneStoragePoolInternal(template, childDataStore); } @@ -681,10 +683,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } private void reallyRun() { - logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Start to preload template {} into primary storage {}", template, pool); StoragePool pol = (StoragePool)_dataStoreMgr.getPrimaryDataStore(pool.getId()); prepareTemplateForCreate(template, pol); - logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("End of preloading template {} into primary storage {}", template, pool); } }); } @@ -695,8 +697,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(template, pool); } else { - logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + - " is different from the requested zone " + zoneId); + logger.info("Skip loading template {} into primary storage {} as pool " + + "zone {} is different from the requested zone {}", template::toString, pool::toString, + () -> _dcDao.findById(pool.getDataCenterId()), () -> _dcDao.findById(zoneId)); } } } @@ -718,7 +721,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (templateStoragePoolRef.getDownloadState() == Status.DOWNLOADED) { if (logger.isDebugEnabled()) { - logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); + logger.debug("Template {} has already been downloaded to pool {}", template, pool); } return templateStoragePoolRef; @@ -733,12 +736,12 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, List vos = _poolHostDao.listByHostStatus(poolId, com.cloud.host.Status.Up); if (vos == null || vos.isEmpty()) { - throw new CloudRuntimeException("Cannot download " + templateId + " to poolId " + poolId + " since there is no host in the Up state connected to this pool"); + throw new CloudRuntimeException(String.format("Cannot download %s to pool %s since there is no host in the Up state connected to this pool", template, pool)); } if (templateStoragePoolRef == null) { if (logger.isDebugEnabled()) { - logger.debug("Downloading template " + templateId + " to pool " + poolId); + logger.debug("Downloading template {} to pool {}", template, pool); } DataStore srcSecStore = _dataStoreMgr.getDataStore(templateStoreRef.getDataStoreId(), DataStoreRole.Image); TemplateInfo srcTemplate = _tmplFactory.getTemplate(templateId, srcSecStore); @@ -753,7 +756,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return _tmpltPoolDao.findByPoolTemplate(poolId, templateId, null); } catch (Exception ex) { - logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); + logger.debug("failed to copy template from image store {} to primary storage", srcSecStore); } } @@ -844,7 +847,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, try { TemplateApiResult result = future.get(); if (result.isFailed()) { - logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); + logger.debug("copy template failed for image store {}: {}", dstSecStore, result.getResult()); continue; // try next image store } @@ -859,26 +862,24 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, List dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) { for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); + logger.debug("Copying {} for source template {}. Copy all Datadisk templates to destination datastore {}", dataDiskTemplates.size(), template, dstSecStore); TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore); AsyncCallFuture dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore); try { TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get(); if (dataDiskCopyResult.isFailed()) { - logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() - + " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one"); + logger.error("Copy of datadisk template: {} to image store: {} failed with error: {} , will try copying the next one", srcDataDiskTemplate, dstSecStore, dataDiskCopyResult.getResult()); continue; // Continue to copy next Datadisk template } _tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId); _resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize()); } catch (Exception ex) { - logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() - + " , will try copying the next one"); + logger.error("Failed to copy datadisk template: {} to image store: {} , will try copying the next one", srcDataDiskTemplate, dstSecStore); } } } } catch (Exception ex) { - logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); + logger.debug("failed to copy template to image store:{} ,will try next one", dstSecStore); } } return true; @@ -902,7 +903,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // Verify template is not Datadisk template if (template.getTemplateType().equals(TemplateType.DATADISK)) { - throw new InvalidParameterValueException("Template " + template.getId() + " is of type Datadisk. Cannot copy Datadisk templates."); + throw new InvalidParameterValueException(String.format("Template %s is of type Datadisk. Cannot copy Datadisk templates.", template)); } if (sourceZoneId != null) { @@ -933,7 +934,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, boolean success = false; if (template.getHypervisorType() == HypervisorType.BareMetal) { if (template.isCrossZones()) { - logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); + logger.debug("Template {} is cross-zone, don't need to copy", template); return template; } for (Long destZoneId: destZoneIds) { @@ -952,18 +953,17 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } if (srcSecStore == null) { - throw new InvalidParameterValueException("There is no template " + templateId + " ready on image store."); + throw new InvalidParameterValueException(String.format("There is no template %s ready on image store.", template)); } if (template.isCrossZones()) { // sync template from cache store to region store if it is not there, for cases where we are going to migrate existing NFS to S3. - _tmpltSvr.syncTemplateToRegionStore(templateId, srcSecStore); + _tmpltSvr.syncTemplateToRegionStore(template, srcSecStore); } for (Long destZoneId : destZoneIds) { DataStore dstSecStore = getImageStore(destZoneId, templateId); if (dstSecStore != null) { - logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + - " in zone " + destZoneId + " , don't need to copy"); + logger.debug("There is template {} in secondary storage {} in zone {} , don't need to copy", template, dstSecStore, dataCenterVOs.get(destZoneId)); continue; } if (!copy(userId, template, srcSecStore, dataCenterVOs.get(destZoneId))) { @@ -1004,7 +1004,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, _tmpltDao.addTemplateToZone(template, dstZoneId); return true; } catch (Exception ex) { - logger.debug("failed to copy template from Zone: " + sourceZone.getUuid() + " to Zone: " + dstZone.getUuid()); + logger.debug("failed to copy template from Zone: {} to Zone: {}", sourceZone, dstZone); } return false; } @@ -1055,7 +1055,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); if (templatePoolRef == null) { - logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); + logger.debug("Can't acquire the lock for template pool ref: {}", templatePoolVO); return; } @@ -1074,11 +1074,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiResult result = future.get(); if (result.isFailed()) { - logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); + logger.debug("Failed to delete template {} from storage pool {}", template, pool); } else { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + logger.debug("Successfully evicted template {} from storage pool {}", template, pool); } } } else { @@ -1088,14 +1088,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (answer != null && answer.getResult()) { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + logger.debug("Successfully evicted template {} from storage pool {}", template, pool); } } else { - logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); + logger.info("Will retry evict template {} from storage pool {}", template, pool); } } } catch (StorageUnavailableException | InterruptedException | ExecutionException e) { - logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); + logger.info("Storage is unavailable currently. Will retry evicte template {} from storage pool {}", template, pool); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); } @@ -1340,7 +1340,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, vmInstanceVOList = _vmInstanceDao.listNonExpungedByTemplate(templateId); } if(!cmd.isForced() && CollectionUtils.isNotEmpty(vmInstanceVOList)) { - final String message = String.format("Unable to delete template with id: %1$s because VM instances: [%2$s] are using it.", templateId, Joiner.on(",").join(vmInstanceVOList)); + final String message = String.format("Unable to delete template: %s because VM instances: [%s] are using it.", template, Joiner.on(",").join(vmInstanceVOList)); logger.warn(message); throw new InvalidParameterValueException(message); } @@ -1488,7 +1488,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } if (!_projectMgr.canAccessProjectAccount(caller, project.getProjectAccountId())) { - throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + projectId); + throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + project.getUuid()); } accountNames.add(_accountMgr.getAccount(project.getProjectAccountId()).getAccountName()); } @@ -1849,7 +1849,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // will not be active when the private template is // created if (!_volumeMgr.volumeInactive(volume)) { - String msg = "Unable to create private template for volume: " + volume.getName() + "; volume is attached to a non-stopped VM, please stop the VM first"; + String msg = String.format("Unable to create private template for volume: %s; volume is attached to a non-stopped VM, please stop the VM first", volume); if (logger.isInfoEnabled()) { logger.info(msg); } @@ -1858,7 +1858,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, hyperType = _volumeDao.getHypervisorType(volumeId); if (HypervisorType.LXC.equals(hyperType)) { - throw new InvalidParameterValueException("Template creation is not supported for LXC volume: " + volumeId); + throw new InvalidParameterValueException(String.format("Template creation is not supported for LXC volume: %s", volume)); } } else { // create template from snapshot snapshot = _snapshotDao.findById(snapshotId); @@ -1877,8 +1877,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, _accountMgr.checkAccess(caller, null, true, snapshot); if (snapshot.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + - " state yet and can't be used for template creation"); + throw new InvalidParameterValueException(String.format("Snapshot %s is not in %s state yet and can't be used for template creation", + snapshot, Snapshot.State.BackedUp)); } /* @@ -2053,7 +2053,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override - public Long getTemplateSize(long templateId, long zoneId) { + public Long getTemplateSize(VirtualMachineTemplate template, long zoneId) { + long templateId = template.getId(); if (_tmplStoreDao.isTemplateMarkedForDirectDownload(templateId)) { // check if template is marked for direct download return _tmplStoreDao.getReadyBypassedTemplate(templateId).getSize(); @@ -2064,7 +2065,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, templateStoreRef = _tmplStoreDao.findByTemplateZoneStagingDownloadStatus(templateId, zoneId, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (templateStoreRef == null) { - throw new CloudRuntimeException("Template " + templateId + " has not been completely downloaded to zone " + zoneId); + throw new CloudRuntimeException(String.format("Template %s has not been completely downloaded to zone %s", template, _dcDao.findById(zoneId))); } } return templateStoreRef.getSize(); @@ -2331,8 +2332,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return; } if (template.isDeployAsIs()) { - String msg = String.format("Deploy-as-is template %s [%s] can not have the UEFI setting. Settings are read directly from the template", - template.getName(), template.getUuid()); + String msg = String.format("Deploy-as-is template %s can not have the UEFI setting. Settings are read directly from the template", template); throw new InvalidParameterValueException(msg); } try { @@ -2377,10 +2377,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override - public List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role, String configurationId) { + public List getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId) { + long templateId = template.getId(); TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, role); if (templateObject == null) { - String msg = String.format("Could not find template %s downloaded on store with role %s", templateId, role.toString()); + String msg = String.format("Could not find template %s downloaded on store with role %s", template, role.toString()); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index 421d2587441..d64a42efbec 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -190,7 +190,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag //List records for all the accounts if the caller account is of type admin. //If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin ignoreAccountId = _accountService.isRootAdmin(caller.getId()); - logger.debug("Account details not available. Using userContext accountId: " + accountId); + logger.debug("Account details not available. Using userContext account: {}", caller); } // Check if a domain admin is allowed to access the requested domain id diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index df8069fdb6d..c7a510049d6 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -41,6 +41,7 @@ import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.host.dao.HostDao; import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.InfrastructureEntity; @@ -273,6 +274,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Inject private IPAddressDao _ipAddressDao; @Inject + private HostDao hostDao; + @Inject private VpcManager _vpcMgr; @Inject private NetworkModel _networkModel; @@ -758,7 +761,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } else if (InfrastructureEntity.class.isAssignableFrom(resourceClass)) { logger.trace("Validation of access to infrastructure entity has been disabled in CloudStack version 4.4."); } - logger.debug(String.format("Account [%s] has access to resource.", account.getUuid())); + logger.debug("Account [{}] has access to resource.", account); } @Override @@ -805,7 +808,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } }); } catch (Exception e) { - logger.error("Failed to update login attempts for user with id " + id); + logger.error("Failed to update login attempts for user {}", () -> _userAccountDao.findById(id)); } } @@ -837,7 +840,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M success = _accountDao.update(Long.valueOf(accountId), acctForUpdate); } else { if (logger.isInfoEnabled()) { - logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); + logger.info("Attempting to lock a non-enabled account {}, current state is {}, locking failed.", account, account.getState()); } } } else { @@ -852,7 +855,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // delete the account record if (!_accountDao.remove(accountId)) { - logger.error("Unable to delete account " + accountId); + logger.error("Unable to delete account {}", account); return false; } @@ -860,7 +863,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _accountDao.update(accountId, account); if (logger.isDebugEnabled()) { - logger.debug("Removed account " + accountId); + logger.debug("Removed account {}", account); } return cleanupAccount(account, callerUserId, caller); @@ -881,7 +884,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } // delete autoscaling VM groups - if (!_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(accountId)) { + if (!_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(account)) { accountCleanupNeeded = true; } @@ -889,7 +892,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // delete global load balancer rules for the account. List gslbRules = _gslbRuleDao.listByAccount(accountId); if (gslbRules != null && !gslbRules.isEmpty()) { - _gslbService.revokeAllGslbRulesForAccount(caller, accountId); + _gslbService.revokeAllGslbRulesForAccount(caller, account); } // delete the account from project accounts @@ -904,15 +907,15 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List groups = _vmGroupDao.listByAccountId(accountId); for (InstanceGroupVO group : groups) { if (!_vmMgr.deleteVmGroup(group.getId())) { - logger.error("Unable to delete group: " + group.getId()); + logger.error("Unable to delete group: {}", group); accountCleanupNeeded = true; } } // Delete the snapshots dir for the account. Have to do this before destroying the VMs. - boolean success = _snapMgr.deleteSnapshotDirsForAccount(accountId); + boolean success = _snapMgr.deleteSnapshotDirsForAccount(account); if (success) { - logger.debug("Successfully deleted snapshots directories for all volumes under account " + accountId + " across all zones"); + logger.debug("Successfully deleted snapshots directories for all volumes under account {} across all zones", account); } // clean up templates @@ -923,14 +926,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { allTemplatesDeleted = _tmpltMgr.delete(callerUserId, template.getId(), null); } catch (Exception e) { - logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); + logger.warn("Failed to delete template {} while removing account {} due to: ", template, account, e); allTemplatesDeleted = false; } } } if (!allTemplatesDeleted) { - logger.warn("Failed to delete templates while removing account id=" + accountId); + logger.warn("Failed to delete templates while removing account {}", account); accountCleanupNeeded = true; } @@ -940,14 +943,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { _vmSnapshotMgr.deleteVMSnapshot(vmSnapshot.getId()); } catch (Exception e) { - logger.debug("Failed to cleanup vm snapshot " + vmSnapshot.getId() + " due to " + e.toString()); + logger.debug("Failed to cleanup vm snapshot {} due to {}", vmSnapshot, e.toString()); } } // Destroy the account's VMs List vms = _userVmDao.listByAccountId(accountId); if (logger.isDebugEnabled()) { - logger.debug("Expunging # of vms (accountId=" + accountId + "): " + vms.size()); + logger.debug("Expunging # of vms (account={}): {}", account, vms.size()); } for (UserVmVO vm : vms) { @@ -956,13 +959,13 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _vmMgr.destroyVm(vm.getId(), false); } catch (Exception e) { e.printStackTrace(); - logger.warn("Failed destroying instance " + vm.getUuid() + " as part of account deletion."); + logger.warn("Failed destroying instance {} as part of account deletion.", vm); } } // no need to catch exception at this place as expunging vm // should pass in order to perform further cleanup if (!_vmMgr.expunge(vm)) { - logger.error("Unable to expunge vm: " + vm.getId()); + logger.error("Unable to expunge vm: {}", vm); accountCleanupNeeded = true; } } @@ -973,7 +976,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { volumeService.deleteVolume(volume.getId(), caller); } catch (Exception ex) { - logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup volumes as a part of account {} cleanup due to Exception: ", account, ex); accountCleanupNeeded = true; } } @@ -983,7 +986,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List vpnUsers = _vpnUser.listByAccount(accountId); for (VpnUserVO vpnUser : vpnUsers) { - _remoteAccessVpnMgr.removeVpnUser(accountId, vpnUser.getUsername(), caller); + _remoteAccessVpnMgr.removeVpnUser(account, vpnUser.getUsername(), caller); } try { @@ -991,7 +994,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, false); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup remote access vpn resources as a part of account {} cleanup due to Exception: ", account, ex); accountCleanupNeeded = true; } @@ -1003,15 +1006,15 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // Cleanup security groups int numRemoved = _securityGroupDao.removeByAccountId(accountId); - logger.info("deleteAccount: Deleted " + numRemoved + " network groups for account " + accountId); + logger.info("deleteAccount: Deleted {} network groups for account {}", numRemoved, account); // Cleanup affinity groups int numAGRemoved = _affinityGroupDao.removeByAccountId(accountId); - logger.info("deleteAccount: Deleted " + numAGRemoved + " affinity groups for account " + accountId); + logger.info("deleteAccount: Deleted {} affinity groups for account {}", numAGRemoved, account); // Delete all the networks boolean networksDeleted = true; - logger.debug("Deleting networks for account " + account.getId()); + logger.debug("Deleting networks for account {}", account); List networks = _networkDao.listByOwner(accountId); if (networks != null) { Collections.sort(networks, new Comparator() { @@ -1031,27 +1034,27 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M ReservationContext context = new ReservationContextImpl(null, null, getActiveUser(callerUserId), caller); if (!_networkMgr.destroyNetwork(network.getId(), context, false)) { - logger.warn("Unable to destroy network " + network + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy network {} as a part of account {} cleanup.", network, account); accountCleanupNeeded = true; networksDeleted = false; } else { - logger.debug("Network " + network.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("Network {} successfully deleted as a part of account {} cleanup.", network, account); } } } // Delete all VPCs boolean vpcsDeleted = true; - logger.debug("Deleting vpcs for account " + account.getId()); + logger.debug("Deleting vpcs for account {}", account); List vpcs = _vpcMgr.getVpcsForAccount(account.getId()); for (Vpc vpc : vpcs) { if (!_vpcMgr.destroyVpc(vpc, caller, callerUserId)) { - logger.warn("Unable to destroy VPC " + vpc + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy VPC {} as a part of account {} cleanup.", vpc, account); accountCleanupNeeded = true; vpcsDeleted = false; } else { - logger.debug("VPC " + vpc.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("VPC {} successfully deleted as a part of account {} cleanup.", vpc, account); } } @@ -1059,35 +1062,35 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release ip addresses belonging to the account List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { - logger.debug("Releasing ip " + ip + " as a part of account id=" + accountId + " cleanup"); - if (!_ipAddrMgr.disassociatePublicIpAddress(ip.getId(), callerUserId, caller)) { - logger.warn("Failed to release ip address " + ip + " as a part of account id=" + accountId + " clenaup"); + logger.debug("Releasing ip {} as a part of account {} cleanup", ip, account); + if (!_ipAddrMgr.disassociatePublicIpAddress(ip, callerUserId, caller)) { + logger.warn("Failed to release ip address {} as a part of account {} cleanup", ip, account); accountCleanupNeeded = true; } } } // Delete Site 2 Site VPN customer gateway - logger.debug("Deleting site-to-site VPN customer gateways for account " + accountId); + logger.debug("Deleting site-to-site VPN customer gateways for account {}", account); if (!_vpnMgr.deleteCustomerGatewayByAccount(accountId)) { - logger.warn("Fail to delete site-to-site VPN customer gateways for account " + accountId); + logger.warn("Fail to delete site-to-site VPN customer gateways for account {}", account); } // Delete autoscale resources if any try { - _autoscaleMgr.cleanUpAutoScaleResources(accountId); + _autoscaleMgr.cleanUpAutoScaleResources(account); } catch (CloudRuntimeException ex) { - logger.warn("Failed to cleanup AutoScale resources as a part of account id=" + accountId + " cleanup due to exception:", ex); + logger.warn("Failed to cleanup AutoScale resources as a part of account {} cleanup due to exception:", account, ex); accountCleanupNeeded = true; } // release account specific Virtual vlans (belong to system Public Network) - only when networks are cleaned // up successfully if (networksDeleted) { - if (!_configMgr.releaseAccountSpecificVirtualRanges(accountId)) { + if (!_configMgr.releaseAccountSpecificVirtualRanges(account)) { accountCleanupNeeded = true; } else { - logger.debug("Account specific Virtual IP ranges " + " are successfully released as a part of account id=" + accountId + " cleanup."); + logger.debug("Account specific Virtual IP ranges are successfully released as a part of account {} cleanup.", account); } } @@ -1103,14 +1106,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _dataCenterVnetDao.releaseDedicatedGuestVlans(map.getId()); } int vlansReleased = _accountGuestVlanMapDao.removeByAccountId(accountId); - logger.info("deleteAccount: Released " + vlansReleased + " dedicated guest vlan ranges from account " + accountId); + logger.info("deleteAccount: Released {} dedicated guest vlan ranges from account {}", vlansReleased, account); // release account specific acquired portable IP's. Since all the portable IP's must have been already // disassociated with VPC/guest network (due to deletion), so just mark portable IP as free. List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { if (ip.isPortable()) { - logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); + logger.debug("Releasing portable ip {} as a part of account {} cleanup", ip, account); _ipAddrMgr.releasePortableIpAddress(ip.getId()); } } @@ -1118,10 +1121,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release dedication if any List dedicatedResources = _dedicatedDao.listByAccountId(accountId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - logger.debug("Releasing dedicated resources for account " + accountId); + logger.debug("Releasing dedicated resources for account {}", account); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - logger.warn("Fail to release dedicated resources for account " + accountId); + logger.warn("Fail to release dedicated resources for account {}", account); } } } @@ -1156,7 +1159,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M accountCleanupNeeded = true; return true; } finally { - logger.info("Cleanup for account " + account.getId() + (accountCleanupNeeded ? " is needed." : " is not needed.")); + logger.info("Cleanup for account {} {}", account, accountCleanupNeeded ? "is needed." : "is not needed."); if (accountCleanupNeeded) { _accountDao.markForCleanup(accountId); } else { @@ -1211,11 +1214,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { _itMgr.advanceStop(vm.getUuid(), false); } catch (OperationTimedoutException ote) { - logger.warn("Operation for stopping vm timed out, unable to stop vm " + vm.getHostName(), ote); + logger.warn("Operation for stopping vm timed out, unable to stop vm {}", vm, ote); success = false; } } catch (AgentUnavailableException aue) { - logger.warn("Agent running on host " + vm.getHostId() + " is unavailable, unable to stop vm " + vm.getHostName(), aue); + logger.warn("Agent running on host {} is unavailable, unable to stop vm {}", () -> hostDao.findById(vm.getHostId()), vm::toString, () -> aue); success = false; } } @@ -1277,7 +1280,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M checkAccess(getCurrentCallingAccount(), domain); if (!_userAccountDao.validateUsernameInDomain(userName, domainId)) { - throw new InvalidParameterValueException("The user " + userName + " already exists in domain " + domainId); + throw new InvalidParameterValueException(String.format("The user %s already exists in domain %s", userName, domain)); } if (networkDomain != null && networkDomain.length() > 0) { @@ -1400,10 +1403,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } checkApiAccess(apiCheckers, caller, command); } catch (PermissionDeniedException pde) { - String msg = String.format("User of Account %s/%s (%s) can not create an account with access to more privileges they have themself.", - caller.getAccountName(), - caller.getDomainId(), - caller.getUuid()); + String msg = String.format("User of Account %s and domain %s can not create an account with access to more privileges they have themself.", + caller, _domainMgr.getDomain(caller.getDomainId())); logger.warn(msg); throw new PermissionDeniedException(msg,pde); } @@ -1458,15 +1459,15 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null || account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find account " + accountName + " in domain id=" + domainId + " to create user"); + throw new InvalidParameterValueException(String.format("Unable to find account %s in domain %s to create user", accountName, domain)); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + account.getId() + " is a system account, can't add a user to it"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, can't add a user to it", account)); } if (!_userAccountDao.validateUsernameInDomain(userName, domainId)) { - throw new CloudRuntimeException("The user " + userName + " already exists in domain " + domainId); + throw new CloudRuntimeException(String.format("The user %s already exists in domain %s", userName, domain)); } UserVO user = null; user = createUser(account.getId(), userName, password, firstName, lastName, email, timeZone, userUUID, source); @@ -1484,7 +1485,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @ActionEvent(eventType = EventTypes.EVENT_USER_UPDATE, eventDescription = "Updating User") public UserAccount updateUser(UpdateUserCmd updateUserCmd) { UserVO user = retrieveAndValidateUser(updateUserCmd); - logger.debug("Updating user with Id: " + user.getUuid()); + logger.debug("Updating user {}", user); validateAndUpdateApiAndSecretKeyIfNeeded(updateUserCmd, user); validateAndUpdateUserApiKeyAccess(updateUserCmd, user); @@ -1526,7 +1527,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M */ public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword, boolean skipCurrentPassValidation) { if (newPassword == null) { - logger.trace("No new password to update for user: " + user.getUuid()); + logger.trace("No new password to update for user: {}", user); return; } if (StringUtils.isBlank(newPassword)) { @@ -1535,7 +1536,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M User.Source userSource = user.getSource(); if (userSource == User.Source.SAML2 || userSource == User.Source.SAML2DISABLED || userSource == User.Source.LDAP) { - logger.warn(String.format("Unable to update the password for user [%d], as its source is [%s].", user.getId(), user.getSource().toString())); + logger.warn("Unable to update the password for user [{}], as its source is [{}].", user, user.getSource().toString()); throw new InvalidParameterValueException("CloudStack does not support updating passwords for SAML or LDAP users. Please contact your cloud administrator for assistance."); } @@ -1547,7 +1548,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M boolean isAdmin = isDomainAdmin || isRootAdminExecutingPasswordUpdate; boolean skipValidation = isAdmin || skipCurrentPassValidation; if (isAdmin) { - logger.trace(String.format("Admin account [uuid=%s] executing password update for user [%s] ", callingAccount.getUuid(), user.getUuid())); + logger.trace("Admin account [{}] executing password update for user [{}] ", callingAccount, user); } if (!skipValidation && StringUtils.isBlank(currentPassword)) { throw new InvalidParameterValueException("To set a new password the current password must be provided."); @@ -1577,7 +1578,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M continue; } if (BooleanUtils.toBoolean(authenticationResult.first())) { - logger.debug(String.format("User [id=%s] re-authenticated [authenticator=%s] during password update.", user.getUuid(), userAuthenticator.getName())); + logger.debug("User [{}] re-authenticated [authenticator={}] during password update.", user, userAuthenticator.getName()); currentPasswordMatchesDataBasePassword = true; break; } @@ -1611,7 +1612,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M Account duplicatedUserAccountWithUserThatHasTheSameUserName = _accountDao.findById(duplicatedUser.getAccountId()); if (duplicatedUserAccountWithUserThatHasTheSameUserName.getDomainId() == account.getDomainId()) { DomainVO domain = _domainDao.findById(duplicatedUserAccountWithUserThatHasTheSameUserName.getDomainId()); - throw new InvalidParameterValueException(String.format("Username [%s] already exists in domain [id=%s,name=%s]", duplicatedUser.getUsername(), domain.getUuid(), domain.getName())); + throw new InvalidParameterValueException(String.format("Username (%s) already exists in domain (%s)", duplicatedUser, domain)); } } user.setUsername(userName); @@ -1668,10 +1669,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M throw new CloudRuntimeException("Unable to find user account with ID: " + user.getAccountId()); } if (account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find user with ID: " + user.getUuid()); + throw new InvalidParameterValueException(String.format("Unable to find user with: %s", user)); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("user UUID : " + user.getUuid() + " is a system account; update is not allowed."); + throw new PermissionDeniedException(String.format("user: %s is a system account; update is not allowed.", user)); } checkAccess(getCurrentCallingAccount(), AccessType.OperateEntry, true, account); return account; @@ -1778,12 +1779,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // don't allow disabling user belonging to project's account if (account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find active user by id " + userId); + throw new InvalidParameterValueException(String.format("Unable to find active user %s", user)); } // If the user is a System user, return an error if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new InvalidParameterValueException("User id : " + userId + " is a system user, disabling is not allowed"); + throw new InvalidParameterValueException(String.format("User: %s is a system user, disabling is not allowed", user)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -1796,7 +1797,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // user successfully disabled return _userAccountDao.findById(userId); } else { - throw new CloudRuntimeException("Unable to disable user " + userId); + throw new CloudRuntimeException(String.format("Unable to disable user %s", user)); } } @@ -1819,12 +1820,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } if (account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find active user by id " + userId); + throw new InvalidParameterValueException(String.format("Unable to find active user %s", user)); } // If the user is a System user, return an error if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new InvalidParameterValueException("User id : " + userId + " is a system user, enabling is not allowed"); + throw new InvalidParameterValueException(String.format("User: %s is a system user, enabling is not allowed", user)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -1849,7 +1850,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return _userAccountDao.findById(userId); } else { - throw new CloudRuntimeException("Unable to enable user " + userId); + throw new CloudRuntimeException(String.format("Unable to enable user %s", user)); } } @@ -1876,7 +1877,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // If the user is a System user, return an error. We do not allow this if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("user id : " + userId + " is a system user, locking is not allowed"); + throw new PermissionDeniedException(String.format("user: %s is a system user, locking is not allowed", user)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -1905,7 +1906,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } else { if (logger.isInfoEnabled()) { - logger.info("Attempting to lock a non-enabled user, current state is " + user.getState() + " (userId: " + user.getId() + "), locking failed."); + logger.info("Attempting to lock a non-enabled user {}, current state is {}, locking failed.", user, user.getState()); } success = false; } @@ -1916,7 +1917,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return _userAccountDao.findById(userId); } else { - throw new CloudRuntimeException("Unable to lock user " + userId); + throw new CloudRuntimeException(String.format("Unable to lock user %s", user)); } } @@ -1950,7 +1951,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M projectIds.append(projectId).append(", "); } - throw new InvalidParameterValueException("The account id=" + accountId + " manages project(s) with ids " + projectIds + "and can't be removed"); + throw new InvalidParameterValueException(String.format("The account %s with id %d manages project(s) with ids %s and can't be removed", account, accountId, projectIds)); } CallContext.current().putContextParameter(Account.class, account.getUuid()); @@ -1964,7 +1965,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return false; } if (account.getRemoved() != null) { - logger.info("The account:" + account.getAccountName() + " is already removed"); + logger.info("The account:{} is already removed", account); return false; } // don't allow removing Project account @@ -1994,11 +1995,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } if (account == null || account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new InvalidParameterValueException(String.format("Unable to find account by accountId: %d OR by name: %s in domain %s", accountId, accountName, _domainMgr.getDomain(domainId))); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + accountId + " is a system account, enable is not allowed"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, enable is not allowed", account)); } // Check if user performing the action is allowed to modify this account @@ -2012,7 +2013,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to enable account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to enable account %s in domain %s", account, accountName, _domainMgr.getDomain(domainId))); } } @@ -2033,7 +2034,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + accountId + " is a system account, lock is not allowed"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, lock is not allowed", account)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -2042,7 +2043,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M CallContext.current().putContextParameter(Account.class, account.getUuid()); return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to lock account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to lock account %s by accountId: %d OR by name: %s in domain %d", account, accountId, accountName, _domainMgr.getDomain(domainId))); } } @@ -2059,11 +2060,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } if (account == null || account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new InvalidParameterValueException(String.format("Unable to find account by accountId: %d OR by name: %s in domain %s", accountId, accountName, _domainMgr.getDomain(domainId))); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + accountId + " is a system account, disable is not allowed"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, disable is not allowed", account)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -2072,7 +2073,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M CallContext.current().putContextParameter(Account.class, account.getUuid()); return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to update account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to update account %s by accountId: %d OR by name: %s in domain %s", account, accountId, accountName, _domainMgr.getDomain(domainId))); } } @@ -2100,8 +2101,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // Check if account exists if (account == null || account.getType() == Account.Type.PROJECT) { - logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); - throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + logger.error("Unable to find account by accountId: {} OR by name: {} in domain {}", accountId, accountName, _domainMgr.getDomain(domainId)); + throw new InvalidParameterValueException(String.format("Unable to find account by accountId: %d OR by name: %s in domain %s", accountId, accountName, _domainMgr.getDomain(domainId))); } // Don't allow to modify system account @@ -2118,16 +2119,17 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if(newAccountName != null) { if (newAccountName.isEmpty()) { - throw new InvalidParameterValueException("The new account name for account '" + account.getUuid() + "' " + - "within domain '" + domainId + "' is empty string. Account will be not renamed."); + throw new InvalidParameterValueException(String.format("The new account name for " + + "account '%s' within domain '%s' is empty string. Account will be not renamed.", + account, _domainMgr.getDomain(domainId))); } // check if the new proposed account name is absent in the domain Account existingAccount = _accountDao.findActiveAccount(newAccountName, domainId); if (existingAccount != null && existingAccount.getId() != account.getId()) { - throw new InvalidParameterValueException("The account with the proposed name '" + - newAccountName + "' exists in the domain '" + - domainId + "' with existing account id '" + existingAccount.getId() + "'"); + throw new InvalidParameterValueException(String.format("The account with the " + + "proposed name '%s' exists in the domain '%s' with existing account %s", + newAccountName, _domainMgr.getDomain(domainId), existingAccount)); } acctForUpdate.setAccountName(newAccountName); @@ -2147,9 +2149,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M final List roles = cmd.roleService.listRoles(); final boolean roleNotFound = roles.stream().filter(r -> r.getId() == roleId).count() == 0; if (roleNotFound) { - throw new InvalidParameterValueException("Role with ID '" + roleId.toString() + "' " + - "is not found or not available for the account '" + account.getUuid() + "' " + - "in the domain '" + domainId + "'."); + throw new InvalidParameterValueException(String.format("Role with ID '%s' is not " + + "found or not available for the account '%s' in the domain '%s'.", + roleId.toString(), account, _domainMgr.getDomain(domainId))); } Role role = roleService.findRole(roleId); @@ -2182,7 +2184,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M CallContext.current().putContextParameter(Account.class, account.getUuid()); return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to update account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to update account %s by accountId: %d OR by name: %s in domain %d", account, accountId, accountName, domainId)); } } @@ -2322,7 +2324,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List removedAccounts = _accountDao.findCleanupsForRemovedAccounts(null); logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); for (AccountVO account : removedAccounts) { - logger.debug("Cleaning up " + account.getId()); + logger.debug("Cleaning up {}", account); cleanupAccount(account, getSystemUser().getId(), getSystemAccount()); } @@ -2330,11 +2332,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List disabledAccounts = _accountDao.findCleanupsForDisabledAccounts(); logger.info("Found " + disabledAccounts.size() + " disabled accounts to cleanup"); for (AccountVO account : disabledAccounts) { - logger.debug("Disabling account " + account.getId()); + logger.debug("Disabling account {}", account); try { disableAccount(account.getId()); } catch (Exception e) { - logger.error("Skipping due to error on account " + account.getId(), e); + logger.error("Skipping due to error on account {}", account, e); } } @@ -2349,20 +2351,20 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain {}", inactiveDomain); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain {}", inactiveDomain); } } } - logger.debug("Removing inactive domain id=" + domainId); + logger.debug("Removing inactive domain {}", inactiveDomain); _domainMgr.removeDomain(domainId); } else { - logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need cleanup"); + logger.debug("Can't remove inactive domain {} as it has accounts that need cleanup", inactiveDomain); } } catch (Exception e) { - logger.error("Skipping due to error on domain " + domainId, e); + logger.error("Skipping due to error on domain {}", inactiveDomain, e); } } @@ -2373,10 +2375,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { Account projectAccount = getAccount(project.getProjectAccountId()); if (projectAccount == null) { - logger.debug("Removing inactive project id=" + project.getId()); + logger.debug("Removing inactive project {}", project); _projectMgr.deleteProject(CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), project); } else { - logger.debug("Can't remove disabled project " + project + " as it has non removed account id=" + project.getId()); + logger.debug("Can't remove disabled project {} as it has non removed account {}", project, projectAccount); } } catch (Exception e) { logger.error("Skipping due to error on project " + project, e); @@ -2427,7 +2429,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M Account owner = _accountDao.findActiveAccount(accountName, domainId); if (owner == null) { - throw new InvalidParameterValueException("Unable to find account " + accountName + " in domain " + domainId); + throw new InvalidParameterValueException(String.format("Unable to find account %s in domain %s", accountName, domain)); } checkAccess(caller, domain); @@ -2524,8 +2526,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } if ((domainId != Domain.ROOT_DOMAIN) && (accountType == Account.Type.ADMIN)) { - throw new InvalidParameterValueException( - "Invalid account type " + accountType + " given for an account in domain " + domainId + "; unable to create user of admin role type in non-ROOT domain."); + throw new InvalidParameterValueException(String.format("Invalid account type %s given for " + + "an account in domain %s; unable to create user of admin role type in non-ROOT domain.", accountType, domain)); } // Validate account/user/domain settings @@ -2545,7 +2547,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (accountType == Account.Type.RESOURCE_DOMAIN_ADMIN) { List dc = _dcDao.findZonesByDomainId(domainId); if (dc.isEmpty()) { - throw new InvalidParameterValueException("The account cannot be created as domain " + domain.getName() + " is not associated with any private Zone"); + throw new InvalidParameterValueException(String.format("The account cannot be created as domain %s is not associated with any private Zone", domain)); } } @@ -2556,7 +2558,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M AccountVO account = _accountDao.persist(new AccountVO(accountName, domainId, networkDomain, accountType, roleId, uuid)); if (account == null) { - throw new CloudRuntimeException("Failed to create account name " + accountName + " in domain id=" + domainId); + throw new CloudRuntimeException(String.format("Failed to create account name %s in domain id=%s", accountName, _domainMgr.getDomain(domainId))); } Long accountId = account.getId(); @@ -2806,9 +2808,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (!userAccount.getState().equalsIgnoreCase(Account.State.ENABLED.toString()) || !userAccount.getAccountState().equalsIgnoreCase(Account.State.ENABLED.toString())) { if (logger.isInfoEnabled()) { - logger.info("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); + logger.info("User {} in domain {} is disabled/locked (or account is disabled/locked)", userAccount, domain); } - throw new CloudAuthenticationException("User " + username + " (or their account) in domain " + domainName + " is disabled/locked. Please contact the administrator."); + throw new CloudAuthenticationException(String.format("User %s (or their account) in domain %s is disabled/locked. Please contact the administrator.", userAccount, domain)); } // Whenever the user is able to log in successfully, reset the login attempts to zero if (!isInternalAccount(userAccount.getId())) { @@ -2850,8 +2852,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M (allowedLoginAttempts - attemptsMade) + " attempt(s) remaining"); } else { updateLoginAttempts(account.getId(), allowedLoginAttempts, true); - logger.warn("User " + account.getUsername() + - " has been disabled due to multiple failed login attempts." + " Please contact admin."); + logger.warn("User {} has been disabled due to multiple failed login attempts. Please contact admin.", account); } } @@ -2944,11 +2945,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // don't allow updating system user if (user.getId() == User.UID_SYSTEM) { - throw new PermissionDeniedException("user id : " + user.getId() + " is system account, update is not allowed"); + throw new PermissionDeniedException(String.format("user: %s is system account, update is not allowed", user)); } // don't allow baremetal system user if (BaremetalUtils.BAREMETAL_SYSTEM_ACCOUNT_NAME.equals(user.getUsername())) { - throw new PermissionDeniedException("user id : " + user.getId() + " is system account, update is not allowed"); + throw new PermissionDeniedException(String.format("user: %s is system account, update is not allowed", user)); } // generate both an api key and a secret key, update the user table with the keys, return the keys to the user @@ -3009,7 +3010,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user {}", _userAccountDao.findById(userId), ex); } return null; } @@ -3036,7 +3037,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user {}", _userAccountDao.findById(userId), ex); } return null; } @@ -3142,7 +3143,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // check permissions permittedAccounts.add(userAccount.getId()); } else { - throw new InvalidParameterValueException("could not find account " + accountName + " in domain " + domain.getUuid()); + throw new InvalidParameterValueException("could not find account " + accountName + " in domain " + domain); } } @@ -3277,7 +3278,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (!enabledOnly || account.getState() == Account.State.ENABLED) { return account.getId(); } else { - throw new PermissionDeniedException("Can't add resources to the account id=" + account.getId() + " in state=" + account.getState() + " as it's no longer active"); + throw new PermissionDeniedException(String.format("Can't add resources to the account %s in state=%s as it's no longer active", account, account.getState())); } } else { // idList is not used anywhere, so removed it now diff --git a/server/src/main/java/com/cloud/user/DomainManagerImpl.java b/server/src/main/java/com/cloud/user/DomainManagerImpl.java index 4a81772d6d7..6fc9c6f5ef5 100644 --- a/server/src/main/java/com/cloud/user/DomainManagerImpl.java +++ b/server/src/main/java/com/cloud/user/DomainManagerImpl.java @@ -362,7 +362,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom try { // mark domain as inactive - logger.debug("Marking domain id=" + domain.getId() + " as " + Domain.State.Inactive + " before actually deleting it"); + logger.debug("Marking domain {} as {} before actually deleting it", domain, Domain.State.Inactive); domain.setState(Domain.State.Inactive); _domainDao.update(domain.getId(), domain); @@ -402,12 +402,12 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom // remove dedicated BGP peers routedIpv4Manager.removeBgpPeersByDomainId(domain.getId()); - if (!_configMgr.releaseDomainSpecificVirtualRanges(domain.getId())) { + if (!_configMgr.releaseDomainSpecificVirtualRanges(domain)) { CloudRuntimeException e = new CloudRuntimeException("Can't delete the domain yet because failed to release domain specific virtual ip ranges"); e.addProxyObject(domain.getUuid(), "domainId"); throw e; } else { - logger.debug("Domain specific Virtual IP ranges " + " are successfully released as a part of domain id=" + domain.getId() + " cleanup."); + logger.debug("Domain specific Virtual IP ranges are successfully released as a part of domain {} cleanup.", domain); } cleanupDomainDetails(domain.getId()); @@ -416,7 +416,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom CallContext.current().putContextParameter(Domain.class, domain.getUuid()); return true; } catch (Exception ex) { - logger.error("Exception deleting domain with id " + domain.getId(), ex); + logger.error("Exception deleting domain {}", domain, ex); if (ex instanceof CloudRuntimeException) { rollbackDomainState(domain); throw (CloudRuntimeException)ex; @@ -431,8 +431,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom * @param domain domain */ protected void rollbackDomainState(DomainVO domain) { - logger.debug("Changing domain id=" + domain.getId() + " state back to " + Domain.State.Active + - " because it can't be removed due to resources referencing to it"); + logger.debug("Changing domain {} state back to {} because it can't be removed due to resources referencing to it", domain, Domain.State.Active); domain.setState(Domain.State.Active); _domainDao.update(domain.getId(), domain); } @@ -448,8 +447,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom protected void tryCleanupDomain(DomainVO domain, long ownerId) throws ConcurrentOperationException, ResourceUnavailableException, CloudRuntimeException { if (!cleanupDomain(domain.getId(), ownerId)) { CloudRuntimeException e = - new CloudRuntimeException("Failed to clean up domain resources and sub domains, delete failed on domain " + domain.getName() + " (id: " + - domain.getId() + ")."); + new CloudRuntimeException(String.format("Failed to clean up domain resources and sub domains, delete failed on domain %s", domain)); e.addProxyObject(domain.getUuid(), "domainId"); throw e; } @@ -472,7 +470,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domain.getId()); List dedicatedResources = _dedicatedDao.listByDomainId(domain.getId()); if (CollectionUtils.isNotEmpty(dedicatedResources)) { - logger.error("There are dedicated resources for the domain " + domain.getId()); + logger.error("There are dedicated resources for the domain {}", domain); hasDedicatedResources = true; } if (accountsForCleanup.isEmpty() && networkIds.isEmpty() && !hasDedicatedResources) { @@ -514,8 +512,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom _messageBus.publish(_name, MESSAGE_PRE_REMOVE_DOMAIN_EVENT, PublishScope.LOCAL, domain); if (!_domainDao.remove(domain.getId())) { CloudRuntimeException e = - new CloudRuntimeException("Delete failed on domain " + domain.getName() + " (id: " + domain.getId() + - "); Please make sure all users and sub domains have been removed from the domain before deleting"); + new CloudRuntimeException(String.format("Delete failed on domain %s; Please make sure all users and sub domains have been removed from the domain before deleting", domain)); e.addProxyObject(domain.getUuid(), "domainId"); throw e; } @@ -604,9 +601,9 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom } protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOperationException, ResourceUnavailableException { - logger.debug("Cleaning up domain id=" + domainId); boolean success = true; DomainVO domainHandle = _domainDao.findById(domainId); + logger.debug("Cleaning up domain {}", domainHandle); { domainHandle.setState(Domain.State.Inactive); _domainDao.update(domainId, domainHandle); @@ -629,7 +626,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom for (DomainVO domain : domains) { success = (success && cleanupDomain(domain.getId(), domain.getAccountId())); if (!success) { - logger.warn("Failed to cleanup domain id=" + domain.getId()); + logger.warn("Failed to cleanup domain {}", domain); } } } @@ -640,15 +637,15 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom List accounts = _accountDao.search(sc, null); for (AccountVO account : accounts) { if (account.getType() != Account.Type.PROJECT) { - logger.debug("Deleting account " + account + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting account {} as a part of domain {} cleanup", account, domainHandle); boolean deleteAccount = _accountMgr.deleteAccount(account, CallContext.current().getCallingUserId(), getCaller()); if (!deleteAccount) { - logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); + logger.warn("Failed to cleanup account {} as a part of domain cleanup", account); } success = (success && deleteAccount); } else { ProjectVO project = _projectDao.findByProjectAccountId(account.getId()); - logger.debug("Deleting project " + project + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting project {} as a part of domain {} cleanup", project, domainHandle); boolean deleteProject = _projectMgr.deleteProject(getCaller(), CallContext.current().getCallingUserId(), project); if (!deleteProject) { logger.warn("Failed to cleanup project " + project + " as a part of domain cleanup"); @@ -659,23 +656,23 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom //delete the domain shared networks boolean networksDeleted = true; - logger.debug("Deleting networks for domain id=" + domainId); + logger.debug("Deleting networks for domain {}", domainHandle); List networkIds = _networkDomainDao.listNetworkIdsByDomain(domainId); CallContext ctx = CallContext.current(); ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(ctx.getCallingUserId()), ctx.getCallingAccount()); for (Long networkId : networkIds) { - logger.debug("Deleting network id=" + networkId + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting network id={} as a part of domain {} cleanup", networkId, domainHandle); if (!_networkMgr.destroyNetwork(networkId, context, false)) { - logger.warn("Unable to destroy network id=" + networkId + " as a part of domain id=" + domainId + " cleanup."); + logger.warn("Unable to destroy network id={} as a part of domain {} cleanup.", networkId, domainHandle); networksDeleted = false; } else { - logger.debug("Network " + networkId + " successfully deleted as a part of domain id=" + domainId + " cleanup."); + logger.debug("Network {} successfully deleted as a part of domain {} cleanup.", networkId, domainHandle); } } //don't proceed if networks failed to cleanup. The cleanup will be performed for inactive domain once again if (!networksDeleted) { - logger.debug("Failed to delete the shared networks as a part of domain id=" + domainId + " clenaup"); + logger.debug("Failed to delete the shared networks as a part of domain {} cleanup", domainHandle); return false; } @@ -686,10 +683,10 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom //release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain {}", domainHandle); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain {}", domainHandle); return false; } } @@ -1005,8 +1002,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom if (currentDomainResourceCount + newParentDomainResourceCount > newParentDomainResourceLimit) { String message = String.format("Cannot move domain [%s] to parent domain [%s] as maximum domain resource limit of type [%s] would be exceeded. The current resource " - + "count for domain [%s] is [%s], the resource count for the new parent domain [%s] is [%s], and the limit is [%s].", domainToBeMoved.getUuid(), - newParentDomain.getUuid(), resourceType, domainToBeMoved.getUuid(), currentDomainResourceCount, newParentDomain.getUuid(), newParentDomainResourceCount, + + "count for domain [%s] is [%s], the resource count for the new parent domain [%s] is [%s], and the limit is [%s].", domainToBeMoved, + newParentDomain, resourceType, domainToBeMoved, currentDomainResourceCount, newParentDomain, newParentDomainResourceCount, newParentDomainResourceLimit); logger.error(message); throw new ResourceAllocationException(message, resourceType); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ce2e5585a09..f33de020446 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -745,12 +745,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir long nicId; long vmId; String vmName; + String vmUuid; boolean isWindows; Long hostId; String networkCidr; - public VmIpAddrFetchThread(long vmId, long nicId, String instanceName, boolean windows, Long hostId, String networkCidr) { + public VmIpAddrFetchThread(long vmId, String vmUuid, long nicId, String instanceName, boolean windows, Long hostId, String networkCidr) { this.vmId = vmId; + this.vmUuid = vmUuid; this.nicId = nicId; this.vmName = instanceName; this.isWindows = windows; @@ -763,10 +765,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir GetVmIpAddressCommand cmd = new GetVmIpAddressCommand(vmName, networkCidr, isWindows); boolean decrementCount = true; + NicVO nic = _nicDao.findById(nicId); try { - logger.debug("Trying IP retrieval for VM {} ({}), nic Id {}", vmName, vmId, nicId); + logger.debug("Trying IP retrieval for VM [id: {}, uuid: {}, name: {}], nic {}", vmId, vmUuid, vmName, nic); Answer answer = _agentMgr.send(hostId, cmd); - NicVO nic = _nicDao.findById(nicId); if (answer.getResult()) { String vmIp = answer.getDetails(); @@ -775,12 +777,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (nic != null) { nic.setIPv4Address(vmIp); _nicDao.update(nicId, nic); - logger.debug("VM {} ({}) - IP {} retrieved successfully", vmName, vmId, vmIp); + logger.debug("Vm [id: {}, uuid: {}, name: {}] - IP {} retrieved successfully", vmId, vmUuid, vmName, vmIp); vmIdCountMap.remove(nicId); decrementCount = false; ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH, - "VM " + vmId + ", nic id " + nicId + ", IP address " + vmIp + " fetched successfully", vmId, ApiCommandResourceType.VirtualMachine.toString()); + String.format("VM [id: %d, uuid: %s, name: %s], nic %s, IP address %s got fetched successfully", + vmId, vmUuid, vmName, nic, vmIp), vmId, ApiCommandResourceType.VirtualMachine.toString()); } } } else { @@ -791,7 +794,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _nicDao.update(nicId, nic); } if (answer.getDetails() != null) { - logger.debug("Failed to get vm ip for Vm {} ({}), details: {}", vmName, vmId, answer.getDetails()); + logger.debug("Failed to get vm ip for Vm [id: {}, uuid: {}, name: {}], details: {}", + vmId, vmUuid, vmName, answer.getDetails()); } } } catch (OperationTimedoutException e) { @@ -802,7 +806,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (decrementCount) { VmAndCountDetails vmAndCount = vmIdCountMap.get(nicId); vmAndCount.decrementCount(); - logger.debug("Ip is not retrieved for VM {} ({}) nic {} ... decremented count to {}", vmName, vmId, nicId, vmAndCount.getRetrievalCount()); + logger.debug("Ip is not retrieved for VM [id: {}, uuid: {}, name: {}] nic {} ... decremented count to {}", + vmId, vmUuid, vmName, nic, vmAndCount.getRetrievalCount()); vmIdCountMap.put(nicId, vmAndCount); } } @@ -843,13 +848,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - logger.error("vm is not in the right state: " + vmId); - throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); + logger.error("vm is not in the right state: {}", userVm); + throw new InvalidParameterValueException(String.format("Vm %s is not in the right state", userVm)); } if (userVm.getState() != State.Stopped) { - logger.error("vm is not in the right state: " + vmId); - throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do password reset"); + logger.error("vm is not in the right state: {}", userVm); + throw new InvalidParameterValueException(String.format("Vm %s should be stopped to do password reset", userVm)); } _accountMgr.checkAccess(caller, null, true, userVm); @@ -949,7 +954,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Do parameters input validation if (userVm.getState() != State.Stopped) { - logger.error("vm is not in the right state: " + vmId); + logger.error("vm ({}) should be stopped to do UserData reset. current state: {}", userVm, userVm.getState()); throw new InvalidParameterValueException(String.format("VM %s should be stopped to do UserData reset", userVm)); } @@ -995,11 +1000,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Do parameters input validation if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - logger.error("vm is not in the right state: " + vmId); + logger.error("vm ({}) is not in the right state: {}", userVm, userVm.getState()); throw new InvalidParameterValueException("Vm with specified id is not in the right state"); } if (userVm.getState() != State.Stopped) { - logger.error("vm is not in the right state: " + vmId); + logger.error(String.format("vm (%s) is not in the stopped state. current state: %s", userVm, userVm.getState())); throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do SSH Key reset"); } @@ -1088,10 +1093,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override public boolean stopVirtualMachine(long userId, long vmId) { boolean status = false; - if (logger.isDebugEnabled()) { - logger.debug("Stopping vm=" + vmId); - } UserVmVO vm = _vmDao.findById(vmId); + if (logger.isDebugEnabled()) { + logger.debug("Stopping vm {} with id {}", vm, vmId); + } if (vm == null || vm.getRemoved() != null) { if (logger.isDebugEnabled()) { logger.debug("VM is either removed or deleted."); @@ -1116,11 +1121,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir UserVmVO vm = _vmDao.findById(vmId); if (logger.isTraceEnabled()) { - logger.trace(String.format("reboot %s with enterSetup set to %s", vm.getInstanceName(), Boolean.toString(enterSetup))); + logger.trace("reboot {} with enterSetup set to {}", vm, Boolean.toString(enterSetup)); } if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - logger.warn("Vm id=" + vmId + " doesn't exist"); + logger.warn("Vm {} with id={} doesn't exist or is not in correct state", vm, vmId); return null; } @@ -1132,7 +1137,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vmOnHost == null || vmOnHost.getResourceState() != ResourceState.Enabled || vmOnHost.getStatus() != Status.Up ) { throw new CloudRuntimeException("Unable to force reboot the VM as the host: " + vm.getHostId() + " is not in the right state"); } - return forceRebootVirtualMachine(vmId, vm.getHostId(), enterSetup); + return forceRebootVirtualMachine(vm, vm.getHostId(), enterSetup); } DataCenterVO dc = _dcDao.findById(vm.getDataCenterId()); @@ -1150,7 +1155,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //Safe to start the stopped router serially, this is consistent with the way how multiple networks are added to vm during deploy //and routers are started serially ,may revisit to make this process parallel for(DomainRouterVO routerToStart : routers) { - logger.warn("Trying to start router " + routerToStart.getInstanceName() + " as part of vm: " + vm.getInstanceName() + " reboot"); + logger.warn("Trying to start router {} as part of vm: {} reboot", routerToStart, vm); _virtualNetAppliance.startRouter(routerToStart.getId(),true); } } @@ -1160,7 +1165,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new CloudRuntimeException("Router start failed due to" + ex); } finally { if (logger.isInfoEnabled()) { - logger.info(String.format("Rebooting vm %s%s.", vm.getInstanceName(), enterSetup? " entering hardware setup menu" : " as is")); + logger.info("Rebooting vm {}{}.", vm, enterSetup ? " entering hardware setup menu" : " as is"); } Map params = null; if (enterSetup) { @@ -1174,24 +1179,22 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } return _vmDao.findById(vmId); } else { - logger.error("Vm id=" + vmId + " is not in Running state, failed to reboot"); + logger.error("Vm {} is not in Running state, failed to reboot", vm); return null; } } - private UserVm forceRebootVirtualMachine(long vmId, long hostId, boolean enterSetup) { + private UserVm forceRebootVirtualMachine(UserVmVO vm, long hostId, boolean enterSetup) { try { - if (stopVirtualMachine(vmId, false) != null) { + if (stopVirtualMachine(vm.getId(), false) != null) { Map params = new HashMap<>(); if (enterSetup) { params.put(VirtualMachineProfile.Param.BootIntoSetup, Boolean.TRUE); } - return startVirtualMachine(vmId, null, null, hostId, params, null, false).first(); + return startVirtualMachine(vm.getId(), null, null, hostId, params, null, false).first(); } - } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException("Unable to reboot the VM: " + vmId, e); } catch (CloudException e) { - throw new CloudRuntimeException("Unable to reboot the VM: " + vmId, e); + throw new CloudRuntimeException(String.format("Unable to reboot the VM: %s", vm), e); } return null; } @@ -1394,12 +1397,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir long currentRootDiskOfferingGiB = currentRootDiskOffering.getDiskSize() / GiB_TO_BYTES; if (newNewOfferingRootSizeInBytes > currentRootDiskOffering.getDiskSize()) { resizeVolumeCmd = new ResizeVolumeCmd(rootVolume.getId(), newRootDiskOffering.getMinIops(), newRootDiskOffering.getMaxIops(), newRootDiskOffering.getId()); - logger.debug(String.format("Preparing command to resize VM Root disk from %d GB to %d GB; current offering: %s, new offering: %s.", currentRootDiskOfferingGiB, - newNewOfferingRootSizeInGiB, currentRootDiskOffering.getName(), newRootDiskOffering.getName())); + logger.debug("Preparing command to resize VM Root disk from {} GB to {} GB; current offering: {}, new offering: {}.", + currentRootDiskOfferingGiB, newNewOfferingRootSizeInGiB, currentRootDiskOffering, newRootDiskOffering); } else if (newNewOfferingRootSizeInBytes > 0l && newNewOfferingRootSizeInBytes < currentRootDiskOffering.getDiskSize()) { throw new InvalidParameterValueException(String.format( - "Failed to resize Root volume. The new Service Offering [id: %d, name: %s] has a smaller disk size [%d GB] than the current disk [%d GB].", - newRootDiskOffering.getId(), newRootDiskOffering.getName(), newNewOfferingRootSizeInGiB, currentRootDiskOfferingGiB)); + "Failed to resize Root volume. The new Service Offering [%s] has a smaller disk size [%d GB] than the current disk [%d GB].", + newRootDiskOffering, newNewOfferingRootSizeInGiB, currentRootDiskOfferingGiB)); } return resizeVolumeCmd; } @@ -1441,7 +1444,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir checkIfNetExistsForVM(vmInstance, network); - macAddress = validateOrReplaceMacAddress(macAddress, network.getId()); + macAddress = validateOrReplaceMacAddress(macAddress, network); if(_nicDao.findByNetworkIdAndMacAddress(networkId, macAddress) != null) { throw new CloudRuntimeException("A NIC with this MAC address exists for network: " + network.getUuid()); @@ -1460,16 +1463,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Verify that zone is not Basic DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == DataCenter.NetworkType.Basic) { - throw new CloudRuntimeException("Zone " + vmInstance.getDataCenterId() + ", has a NetworkType of Basic. Can't add a new NIC to a VM on a Basic Network"); + throw new CloudRuntimeException(String.format("Zone %s, has a NetworkType of Basic. Can't add a new NIC to a VM on a Basic Network", dc)); } //ensure network belongs in zone if (network.getDataCenterId() != vmInstance.getDataCenterId()) { - throw new CloudRuntimeException(vmInstance + " is in zone:" + vmInstance.getDataCenterId() + " but " + network + " is in zone:" + network.getDataCenterId()); + throw new CloudRuntimeException(String.format("%s is in zone: %s but %s is in zone: %s", + vmInstance, dc, network, dataCenterDao.findById(network.getDataCenterId()))); } if(_networkModel.getNicInNetwork(vmInstance.getId(),network.getId()) != null){ - logger.debug("VM " + vmInstance.getHostName() + " already in network " + network.getName() + " going to add another NIC"); + logger.debug("VM {} already in network {} going to add another NIC", vmInstance, network); } else { //* get all vms hostNames in the network List hostNames = _vmInstanceDao.listDistinctHostNames(network.getId()); @@ -1536,12 +1540,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir /** * If the given MAC address is invalid it replaces the given MAC with the next available MAC address */ - protected String validateOrReplaceMacAddress(String macAddress, long networkId) { + protected String validateOrReplaceMacAddress(String macAddress, NetworkVO network) { if (!NetUtils.isValidMac(macAddress)) { try { - macAddress = _networkModel.getNextAvailableMacAddressInNetwork(networkId); + macAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException(String.format("A MAC address cannot be generated for this NIC in the network [id=%s] ", networkId)); + throw new CloudRuntimeException(String.format("A MAC address cannot be generated for this NIC in the network [%s] ", network)); } } return macAddress; @@ -1590,7 +1594,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Verify that zone is not Basic DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == DataCenter.NetworkType.Basic) { - throw new InvalidParameterValueException("Zone " + vmInstance.getDataCenterId() + ", has a NetworkType of Basic. Can't remove a NIC from a VM on a Basic Network"); + throw new InvalidParameterValueException(String.format("Zone %s, has a NetworkType of Basic. Can't remove a NIC from a VM on a Basic Network", dc)); } // check to see if nic is attached to VM @@ -1658,7 +1662,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Verify that zone is not Basic DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == DataCenter.NetworkType.Basic) { - throw new CloudRuntimeException("Zone " + vmInstance.getDataCenterId() + ", has a NetworkType of Basic. Can't change default NIC on a Basic Network"); + throw new CloudRuntimeException(String.format("Zone %s, has a NetworkType of Basic. Can't change default NIC on a Basic Network", dc)); } // no need to check permissions for network, we'll enumerate the ones they already have access to @@ -1750,8 +1754,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return _vmDao.findById(vmInstance.getId()); } - throw new CloudRuntimeException("something strange happened, new default network(" + newdefault.getId() + ") is not null, and is not equal to the network(" - + nic.getNetworkId() + ") of the chosen nic"); + throw new CloudRuntimeException(String.format("something strange happened, new default network(%s) is not null, and is not equal to the network(%d) of the chosen nic", newdefault, nic.getNetworkId())); } @Override @@ -1810,10 +1813,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { ipaddr = _ipAddrMgr.allocateGuestIP(network, ipaddr); } catch (InsufficientAddressCapacityException e) { - throw new InvalidParameterValueException("Allocating ip to guest nic " + nicVO.getUuid() + " failed, for insufficient address capacity"); + throw new InvalidParameterValueException(String.format("Allocating ip to guest nic %s failed, for insufficient address capacity", nicVO)); } if (ipaddr == null) { - throw new InvalidParameterValueException("Allocating ip to guest nic " + nicVO.getUuid() + " failed, please choose another ip"); + throw new InvalidParameterValueException(String.format("Allocating ip to guest nic %s failed, please choose another ip", nicVO)); } if (nicVO.getIPv4Address() != null) { @@ -1855,7 +1858,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir }); } } catch (InsufficientAddressCapacityException e) { - logger.error("Allocating ip to guest nic " + nicVO.getUuid() + " failed, for insufficient address capacity"); + logger.error("Allocating ip to guest nic {} failed, for insufficient address capacity", nicVO); return null; } } else { @@ -2095,7 +2098,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // #1 Check existing host has capacity & and the correct tags if (!excludes.shouldAvoid(ApiDBUtils.findHostById(vmInstance.getHostId()))) { existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(vmInstance.getHostId(), newCpu, newSpeed) - && _capacityMgr.checkIfHostHasCapacity(vmInstance.getHostId(), cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, + && _capacityMgr.checkIfHostHasCapacity(host, cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_CPU), _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_MEMORY), false) && checkEnforceStrictHostTagCheck(vmInstance, host); @@ -2165,7 +2168,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (currentRootDiskOffering.getId() == newDiskOffering.getId() && (!newDiskOffering.isCustomized() || (newDiskOffering.isCustomized() && Objects.equals(rootVolumeOfVm.getSize(), rootDiskSizeBytes)))) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Volume %s is already having disk offering %s", rootVolumeOfVm, newDiskOffering.getUuid())); + logger.debug("Volume {} is already having disk offering {}", rootVolumeOfVm, newDiskOffering); } continue; } @@ -2284,20 +2287,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm.getRemoved() != null) { if (logger.isDebugEnabled()) { - logger.debug("Unable to find vm or vm is removed: " + vmId); + logger.debug("Unable to find vm. vm is removed: {}", vm); } - throw new InvalidParameterValueException("Unable to find vm by id " + vmId); + throw new InvalidParameterValueException("Unable to find vm by id " + vm.getUuid()); } if (vm.getState() != State.Destroyed) { if (logger.isDebugEnabled()) { - logger.debug("vm is not in the right state: " + vmId); + logger.debug("vm {} is not in the Destroyed state. current sate: {}", vm, vm.getState()); } - throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); + throw new InvalidParameterValueException("Vm with id " + vm.getUuid() + " is not in the right state"); } if (logger.isDebugEnabled()) { - logger.debug("Recovering vm " + vmId); + logger.debug("Recovering vm {}", vm); } Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @@ -2324,11 +2327,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { if (!_itMgr.stateTransitTo(vm, VirtualMachine.Event.RecoveryRequested, null)) { - logger.debug("Unable to recover the vm because it is not in the correct state: " + vmId); - throw new InvalidParameterValueException("Unable to recover the vm because it is not in the correct state: " + vmId); + logger.debug("Unable to recover the vm {} because it is not in the correct state. current state: {}", vm, vm.getState()); + throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState())); } } catch (NoTransitionException e) { - throw new InvalidParameterValueException("Unable to recover the vm because it is not in the correct state: " + vmId); + throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState())); } // Recover the VM's disks @@ -2498,7 +2501,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Cleanup vm resources - all the PF/LB/StaticNat rules // associated with vm logger.debug("Starting cleaning up vm " + vm + " resources..."); - if (cleanupVmResources(vm.getId())) { + if (cleanupVmResources(vm)) { logger.debug("Successfully cleaned up vm " + vm + " resources as a part of expunge process"); } else { logger.warn("Failed to cleanup resources as a part of vm " + vm + " expunge"); @@ -2546,20 +2549,21 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - private boolean cleanupVmResources(long vmId) { + private boolean cleanupVmResources(UserVmVO vm) { + long vmId = vm.getId(); boolean success = true; // Remove vm from security groups - _securityGroupMgr.removeInstanceFromGroups(vmId); + _securityGroupMgr.removeInstanceFromGroups(vm); // Remove vm from instance group removeInstanceFromInstanceGroup(vmId); // cleanup firewall rules if (_firewallMgr.revokeFirewallRulesForVm(vmId)) { - logger.debug("Firewall rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Firewall rules are removed successfully as a part of vm {} expunge", vm); } else { success = false; - logger.warn("Fail to remove firewall rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove firewall rules as a part of vm {} expunge", vm); } // cleanup port forwarding rules @@ -2567,19 +2571,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir NsxProviderVO nsx = nsxProviderDao.findByZoneId(vmInstanceVO.getDataCenterId()); if (Objects.isNull(nsx) || Objects.isNull(kubernetesServiceHelpers.get(0).findByVmId(vmId))) { if (_rulesMgr.revokePortForwardingRulesForVm(vmId)) { - logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Port forwarding rules are removed successfully as a part of vm {} expunge", vm); } else { success = false; - logger.warn("Fail to remove port forwarding rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove port forwarding rules as a part of vm {} expunge", vm); } } // cleanup load balancer rules if (_lbMgr.removeVmFromLoadBalancers(vmId)) { - logger.debug("Removed vm id=" + vmId + " from all load balancers as a part of expunge process"); + logger.debug("Removed vm {} from all load balancers as a part of expunge process", vm); } else { success = false; - logger.warn("Fail to remove vm id=" + vmId + " from load balancers as a part of expunge process"); + logger.warn("Fail to remove vm {} from load balancers as a part of expunge process", vm); } // If vm is assigned to static nat, disable static nat for the ip @@ -2589,14 +2593,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir for (IPAddressVO ip : ips) { try { if (_rulesMgr.disableStaticNat(ip.getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM, true)) { - logger.debug("Disabled 1-1 nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.debug("Disabled 1-1 nat for ip address {} as a part of vm {} expunge", ip, vm); } else { - logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.warn("Failed to disable static nat for ip address {} as a part of vm {} expunge", ip, vm); success = false; } } catch (ResourceUnavailableException e) { success = false; - logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge because resource is unavailable", e); + logger.warn("Failed to disable static nat for ip address {} as a part of vm {} expunge because resource is unavailable", ip, vm, e); } } @@ -2617,7 +2621,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm != null) { if (vm.getState().equals(State.Stopped)) { - logger.debug("Destroying vm " + vm + " as it failed to create on Host with Id:" + hostId); + HostVO host = _hostDao.findById(hostId); + logger.debug("Destroying vm {} as it failed to create on Host: {} with id {}", vm, host, hostId); try { _itMgr.stateTransitTo(vm, VirtualMachine.Event.OperationFailedToError, null); } catch (NoTransitionException e1) { @@ -2631,7 +2636,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir volumeMgr.destroyVolume(volume); } } - String msg = "Failed to deploy Vm with Id: " + vmId + ", on Host with Id: " + hostId; + String msg = String.format("Failed to deploy Vm %s, on Host %s with Id: %d", vm, host, hostId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); // Get serviceOffering and template for Virtual Machine @@ -2681,7 +2686,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VirtualMachine vm = vmProfile.getVirtualMachine(); boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - _vmIpFetchThreadExecutor.execute(new VmIpAddrFetchThread(vmId, nicId, vmInstance.getInstanceName(), + _vmIpFetchThreadExecutor.execute(new VmIpAddrFetchThread(vmId, vmInstance.getUuid(), nicId, vmInstance.getInstanceName(), isWindows, vm.getHostId(), network.getCidr())); } @@ -3029,8 +3034,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getState() == State.Error || vm.getState() == State.Expunging) { - logger.error("vm is not in the right state: " + id); - throw new InvalidParameterValueException("Vm with id " + id + " is not in the right state"); + logger.error("vm {} is not in the correct state. current state: {}", vm, vm.getState()); + throw new InvalidParameterValueException(String.format("Vm %s is not in the right state", vm)); } if (displayName == null) { @@ -3093,7 +3098,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Dynamic Scaling cannot be enabled for the VM since its service offering does not have dynamic scaling enabled"); } if (!UserVmManager.EnableDynamicallyScaleVm.valueIn(vm.getDataCenterId())) { - logger.debug(String.format("Dynamic Scaling cannot be enabled for the VM %s since the global setting enable.dynamic.scale.vm is set to false", vm.getUuid())); + logger.debug("Dynamic Scaling cannot be enabled for the VM {} since the global setting enable.dynamic.scale.vm is set to false", vm); throw new InvalidParameterValueException("Dynamic Scaling cannot be enabled for the VM since corresponding global setting is set to false"); } } @@ -3127,9 +3132,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (securityGroupIdList != null && _networkModel.isSecurityGroupSupportedInNetwork(defaultNetwork) && _networkModel.canAddDefaultSecurityGroup()) { if (vm.getState() == State.Stopped) { // Remove instance from security groups - _securityGroupMgr.removeInstanceFromGroups(id); + _securityGroupMgr.removeInstanceFromGroups(vm); // Add instance in provided groups - _securityGroupMgr.addInstanceToGroups(id, securityGroupIdList); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); } else { throw new InvalidParameterValueException("Virtual machine must be stopped prior to update security groups "); } @@ -3185,7 +3190,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir protected void updateUserData(UserVm vm) throws ResourceUnavailableException, InsufficientCapacityException { boolean result = updateUserDataInternal(vm); if (result) { - logger.debug(String.format("User data successfully updated for vm id: %s", vm.getId())); + logger.debug("User data successfully updated for vm id: {}", vm); } else { throw new CloudRuntimeException("Failed to reset userdata for the virtual machine "); } @@ -3200,7 +3205,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir List routers = _routerDao.findByNetwork(nic.getNetworkId()); for (DomainRouterVO router : routers) { if (router.getState() != State.Running) { - logger.warn(String.format("Unable to update DNS for VM %s, as virtual router: %s is not in the right state: %s ", vm, router.getName(), router.getState())); + logger.warn("Unable to update DNS for VM {}, as virtual router: {} is not in the right state: {} ", vm, router, router.getState()); continue; } Commands commands = new Commands(Command.OnError.Stop); @@ -3226,7 +3231,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir List nics = _nicDao.listByVmId(vm.getId()); if (nics == null || nics.isEmpty()) { - logger.error("unable to find any nics for vm " + vm.getUuid()); + logger.error("unable to find any nics for vm {}", vm); return false; } @@ -3255,7 +3260,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return true; } } else { - logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vmProfile.getId() + " because it is not supported in network id=" + network.getId()); + logger.debug("Not applying userdata for nic {} in vm {} because it is not supported in network {}", nic, vmProfile, network); } return false; } @@ -3314,7 +3319,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _accountMgr.checkAccess(caller, null, true, vmInstance); - checkIfHostOfVMIsInPrepareForMaintenanceState(vmInstance.getHostId(), vmId, "Reboot"); + checkIfHostOfVMIsInPrepareForMaintenanceState(vmInstance, "Reboot"); // If the VM is Volatile in nature, on reboot discard the VM's root disk and create a new root disk for it: by calling restoreVM long serviceOfferingId = vmInstance.getServiceOfferingId(); @@ -3339,8 +3344,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir for (NicVO nic : nics) { Network network = _networkModel.getNetwork(nic.getNetworkId()); if (_networkModel.isSharedNetworkWithoutServices(network.getId())) { - logger.debug("Adding vm " +vmId +" nic id "+ nic.getId() +" into vmIdCountMap as part of vm " + - "reboot for vm ip fetch "); + logger.debug("Adding vm {} nic {} into vmIdCountMap as part of vm reboot for vm ip fetch ", userVm, nic); vmIdCountMap.put(nic.getId(), new VmAndCountDetails(nic.getInstanceId(), VmIpFetchTrialMax.value())); } } @@ -3402,7 +3406,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (Arrays.asList(State.Destroyed, State.Expunging).contains(vm.getState()) && !expunge) { - logger.debug("Vm id=" + vmId + " is already destroyed"); + logger.debug("Vm {} is already destroyed", vm); return vm; } @@ -3419,11 +3423,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir checkPluginsIfVmCanBeDestroyed(vm); // check if there are active volume snapshots tasks - logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); - if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, vm destroy is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm {}", vm); List volumesToBeDeleted = getVolumesFromIds(cmd); @@ -3437,7 +3441,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Detach all data disks from VM List dataVols = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); - detachVolumesFromVm(dataVols); + detachVolumesFromVm(vm, dataVols); UserVm destroyedVm = destroyVm(vmId, expunge); if (expunge && !expunge(vm)) { @@ -3453,7 +3457,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (rootVolume != null) { _volService.destroyVolume(rootVolume.getId()); } else { - logger.warn(String.format("Tried to destroy ROOT volume for VM [%s], but couldn't retrieve it.", vm.getUuid())); + logger.warn("Tried to destroy ROOT volume for VM [{}], but couldn't retrieve it.", vm); } } @@ -3491,7 +3495,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir boolean isNameInUse = _vmGroupDao.isNameInUse(accountId, groupName); if (isNameInUse) { - throw new InvalidParameterValueException("Unable to create vm group, a group with name " + groupName + " already exists for account " + accountId); + throw new InvalidParameterValueException(String.format("Unable to create vm group, a group with name %s already exists for account %s", groupName, owner)); } return createVmGroup(groupName, accountId); @@ -3573,7 +3577,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (group != null) { UserVm userVm = _vmDao.acquireInLockTable(userVmId); if (userVm == null) { - logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm {} with id {}", vm, userVmId); } try { final InstanceGroupVO groupFinal = group; @@ -3584,8 +3588,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // it. InstanceGroupVO ngrpLock = _vmGroupDao.lockRow(groupFinal.getId(), false); if (ngrpLock == null) { - logger.warn("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); - throw new CloudRuntimeException("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); + logger.warn("Failed to acquire lock on vm group {}", groupFinal); + throw new CloudRuntimeException(String.format("Failed to acquire lock on vm group %s", groupFinal)); } // Currently don't allow to assign a vm to more than one group @@ -3766,7 +3770,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (!_networkModel.isSecurityGroupSupportedInNetwork(network) && (ntwkOffering.getGuestType() != GuestType.L2)) { - throw new InvalidParameterValueException("Network is not security group enabled or not L2 network: " + network.getId()); + throw new InvalidParameterValueException(String.format("Network is not security group enabled or not L2 network: %s", network)); } _accountMgr.checkAccess(owner, AccessType.UseEntry, false, network); @@ -3881,7 +3885,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // don't allow to use system networks NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); if (networkOffering.isSystemOnly()) { - throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); + throw new InvalidParameterValueException(String.format("Network id=%s is system only and can't be used for vm deployment", network.getUuid())); } networkList.add(network); } @@ -3898,9 +3902,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir logger.info("Loading UserVm " + vmId + " from DB"); UserVm userVm = getUserVm(vmId); if (userVm == null) { - logger.info("Loaded UserVm " + vmId + " (" + userVm.getUuid() + ") from DB"); + logger.warn("UserVm with {} does not exist in DB", vmId); } else { - logger.warn("UserVm " + vmId + " does not exist in DB"); + logger.info("Loaded UserVm {} from DB", userVm); } return userVm; } @@ -3928,7 +3932,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // don't allow to use system networks NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); if (networkOffering.isSystemOnly()) { - throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); + throw new InvalidParameterValueException(String.format("Network id=%s is system only and can't be used for vm deployment", network.getUuid())); } return network; } @@ -3964,7 +3968,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir defaultNetwork = _networkDao.findById(virtualNetworks.get(0).getId()); } } else { - throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + throw new InvalidParameterValueException(String.format("Required network offering %s is not in %s", requiredOfferings.get(0), NetworkOffering.State.Enabled)); } return defaultNetwork; @@ -3980,7 +3984,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account {} from the network offering {} as a part of deployVM process", owner, requiredOfferings.get(0)); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null, null); @@ -4054,8 +4058,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { throw new PermissionDeniedException( - "Cannot perform this operation, Zone is currently disabled: " - + zone.getId()); + String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } // check if zone is dedicated @@ -4116,7 +4119,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } UserVm vm = getCheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, additionalDiskSize); - _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); if (affinityGroupIdList != null && !affinityGroupIdList.isEmpty()) { _affinityGroupVMMapDao.updateMap(vm.getId(), affinityGroupIdList); @@ -4202,20 +4205,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } long dataDiskTemplateId = datadiskTemplateToDiskOffering.getKey(); if (!dataDiskTemplate.getParentTemplateId().equals(template.getId())) { - throw new InvalidParameterValueException("Invalid Datadisk template. Specified Datadisk template" + dataDiskTemplateId - + " doesn't belong to template " + template.getId()); + throw new InvalidParameterValueException(String.format("Invalid Datadisk template. Specified Datadisk template %s doesn't belong to template %s", dataDiskTemplate, template)); } if (dataDiskOffering == null) { - throw new InvalidParameterValueException("Invalid disk offering id " + datadiskTemplateToDiskOffering.getValue().getId() + - " specified for datadisk template " + dataDiskTemplateId); + throw new InvalidParameterValueException(String.format("Invalid disk offering %s specified for datadisk template %s", datadiskTemplateToDiskOffering.getValue(), dataDiskTemplate)); } if (dataDiskOffering.isCustomized()) { - throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " + - dataDiskTemplateId + ". Custom Disk offerings are not supported for Datadisk templates"); + throw new InvalidParameterValueException(String.format("Invalid disk offering %s specified for datadisk template %s. Custom Disk offerings are not supported for Datadisk templates", dataDiskOffering, dataDiskTemplate)); } if (dataDiskOffering.getDiskSize() < dataDiskTemplate.getSize()) { - throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " + - dataDiskTemplateId + ". Disk offering size should be greater than or equal to the template size"); + throw new InvalidParameterValueException(String.format("Invalid disk offering %s specified for datadisk template %s. Disk offering size should be greater than or equal to the template size", dataDiskOffering, dataDiskTemplate)); } _templateDao.loadDetails(dataDiskTemplate); resourceLimitService.checkVolumeResourceLimit(owner, true, dataDiskOffering.getDiskSize(), dataDiskOffering); @@ -4267,15 +4266,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(vmType) && !SHAREDFSVM.equals(vmType)) { - throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm"); + throw new InvalidParameterValueException(String.format("Unable to use system template %s to deploy a user vm", template)); } List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); if (listZoneTemplate == null || listZoneTemplate.isEmpty()) { - throw new InvalidParameterValueException("The template " + template.getId() + " is not available for use"); + throw new InvalidParameterValueException(String.format("The template %s is not available for use", template)); } if (isIso && !template.isBootable()) { - throw new InvalidParameterValueException("Installing from ISO requires an ISO that is bootable: " + template.getId()); + throw new InvalidParameterValueException(String.format("Installing from ISO requires an ISO that is bootable: %s", template)); } // Check templates permissions @@ -4306,8 +4305,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir for (NetworkVO network : networkList) { if ((network.getDataCenterId() != zone.getId())) { if (!network.isStrechedL2Network()) { - throw new InvalidParameterValueException("Network id=" + network.getId() + - " doesn't belong to zone " + zone.getId()); + throw new InvalidParameterValueException(String.format("Network %s doesn't belong to zone %s", network, zone)); } NetworkOffering ntwkOffering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -4356,7 +4354,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (template.isEnablePassword()) { - throw new InvalidParameterValueException(String.format("Unable to deploy VM as template %s is password enabled, but there is no support for %s service in the default network %s/%s", template.getId(), Service.UserData.getName(), network.getName(), network.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to deploy VM as template %s is password enabled, but there is no support for %s service in the default network %s/%s", template, Service.UserData.getName(), network.getName(), network.getUuid())); } } } @@ -4933,17 +4931,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); final HostVO host = _hostDao.findById(hostId); + Account account = _accountMgr.getAccount(userVm.getAccountId()); GetVmNetworkStatsAnswer networkStatsAnswer = null; try { networkStatsAnswer = (GetVmNetworkStatsAnswer) _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - logger.warn("Error while collecting network stats for vm: " + userVm.getHostName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting network stats for vm: {} from host: {}", userVm, host, e); return; } if (networkStatsAnswer != null) { if (!networkStatsAnswer.getResult()) { - logger.warn("Error while collecting network stats vm: " + userVm.getHostName() + " from host: " + host.getName() + "; details: " + networkStatsAnswer.getDetails()); + logger.warn("Error while collecting network stats vm: {} from host: {}; details: {}", userVm, host, networkStatsAnswer.getDetails()); return; } try { @@ -4982,7 +4981,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vmNetworkStat_lock == null) { - logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and nicId:" + nic.getId()); + logger.warn("unable to find vm network stats from host for account: {} with vm: {} and nic: {}", account, userVm, nic); continue; } @@ -4990,16 +4989,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir && ((previousvmNetworkStats.getCurrentBytesSent() != vmNetworkStat_lock.getCurrentBytesSent()) || (previousvmNetworkStats.getCurrentBytesReceived() != vmNetworkStat_lock.getCurrentBytesReceived()))) { logger.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " + - "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + + "Ignoring current answer. Host: " + host + " . VM: " + vmNetworkStat.getVmName() + " Sent(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Received(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesReceived())); continue; } if (vmNetworkStat_lock.getCurrentBytesSent() > vmNetworkStat.getBytesSent()) { if (logger.isDebugEnabled()) { - logger.debug("Sent # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + - " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); + logger.debug("Sent # of bytes that's less than the last one. Assuming something went wrong and persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmNetworkStat.getVmName(), toHumanReadableSize(vmNetworkStat.getBytesSent()), toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); } vmNetworkStat_lock.setNetBytesSent(vmNetworkStat_lock.getNetBytesSent() + vmNetworkStat_lock.getCurrentBytesSent()); } @@ -5007,9 +5005,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vmNetworkStat_lock.getCurrentBytesReceived() > vmNetworkStat.getBytesReceived()) { if (logger.isDebugEnabled()) { - logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + - " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); + logger.debug("Received # of bytes that's less than the last one. Assuming something went wrong and persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmNetworkStat.getVmName(), toHumanReadableSize(vmNetworkStat.getBytesReceived()), toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); } vmNetworkStat_lock.setNetBytesReceived(vmNetworkStat_lock.getNetBytesReceived() + vmNetworkStat_lock.getCurrentBytesReceived()); } @@ -5026,7 +5023,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } }); } catch (Exception e) { - logger.warn("Unable to update vm network statistics for vm: " + userVm.getId() + " from host: " + hostId, e); + logger.warn("Unable to update vm network statistics for vm: {} from host: {}", userVm, host, e); } } } @@ -5094,7 +5091,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } catch (Exception e) { - logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e); + logger.fatal("Unable to resize the data disk for vm {} due to {}", vm, e.getMessage(), e); } } finally { @@ -5482,8 +5479,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); // if account is removed, return error - if (callerAccount != null && callerAccount.getRemoved() != null) { - throw new InvalidParameterValueException("The account " + callerAccount.getId() + " is removed"); + if (callerAccount == null || callerAccount.getRemoved() != null) { + throw new InvalidParameterValueException(String.format("The account %s is removed", callerAccount)); } UserVmVO vm = _vmDao.findById(vmId); @@ -5505,7 +5502,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (owner.getState() == Account.State.DISABLED) { - throw new PermissionDeniedException("The owner of " + vm + " is disabled: " + vm.getAccountId()); + throw new PermissionDeniedException(String.format("The owner of %s is disabled: %s", vm, owner)); } VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); if (VirtualMachineManager.ResourceCountRunningVMsonly.value()) { @@ -5525,7 +5522,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (defaultSecurityGroup != null) { List groupList = new ArrayList(); groupList.add(defaultSecurityGroup.getId()); - _securityGroupMgr.addInstanceToGroups(vmId, groupList); + _securityGroupMgr.addInstanceToGroups(vm, groupList); } } // Choose deployment planner @@ -5547,9 +5544,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { String errorMsg; if (!cpuCapabilityAndCapacity.first()) { - errorMsg = String.format("Cannot deploy the VM to specified host %d, requested CPU and speed is more than the host capability", hostId); + errorMsg = String.format("Cannot deploy the VM to specified host %s, requested CPU and speed is more than the host capability", destinationHost); } else { - errorMsg = String.format("Cannot deploy the VM to specified host %d, host does not have enough free CPU or RAM, please check the logs", hostId); + errorMsg = String.format("Cannot deploy the VM to specified host %s, host does not have enough free CPU or RAM, please check the logs", destinationHost); } logger.info(errorMsg); if (!AllowDeployVmIfGivenHostFails.value()) { @@ -5744,7 +5741,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { - logger.trace("Vm id=" + vmId + " is already destroyed"); + logger.trace("Vm {} is already destroyed", vm); return vm; } @@ -5813,17 +5810,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); final HostVO host = _hostDao.findById(hostId); + Account account = _accountMgr.getAccount(userVm.getAccountId()); GetVmDiskStatsAnswer diskStatsAnswer = null; try { diskStatsAnswer = (GetVmDiskStatsAnswer)_agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - logger.warn("Error while collecting disk stats for vm: " + userVm.getInstanceName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting disk stats for vm: {} from host: {}", userVm, host, e); return; } if (diskStatsAnswer != null) { if (!diskStatsAnswer.getResult()) { - logger.warn("Error while collecting disk stats vm: " + userVm.getInstanceName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); + logger.warn("Error while collecting disk stats vm: {} from host: {}; details: {}", userVm, host, diskStatsAnswer.getDetails()); return; } try { @@ -5857,8 +5855,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vmDiskStat_lock == null) { - logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" - + volume.getId()); + logger.warn("unable to find vm disk stats from host for account: {} with vm: {} and volume: {}", account, userVm, volume); continue; } @@ -5867,41 +5864,53 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir .getCurrentIOWrite()) || (previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) || (previousVmDiskStats .getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite())))) { - logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " IO Read: " + vmDiskStat.getIORead() + " IO Write: " + vmDiskStat.getIOWrite() + " Bytes Read: " - + vmDiskStat.getBytesRead() + " Bytes Write: " + vmDiskStat.getBytesWrite()); + logger.debug("vm disk stats changed from the time" + + " GetVmDiskStatsCommand was sent. Ignoring current " + + "answer. Host: {} . VM: {} IO Read: {} IO Write: {} " + + "Bytes Read: {} Bytes Write: {}", + host, vmDiskStat, vmDiskStat.getIORead(), vmDiskStat.getIOWrite(), + vmDiskStat.getBytesRead(), vmDiskStat.getBytesWrite()); continue; } if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { if (logger.isDebugEnabled()) { - logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); + logger.debug("Read # of IO that's less than " + + "the last one. Assuming something went wrong and " + + "persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmDiskStat, vmDiskStat.getIORead(), vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { if (logger.isDebugEnabled()) { - logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); + logger.debug("Write # of IO that's less than " + + "the last one. Assuming something went wrong and " + + "persisting it. Host: {}. VM: {} Reported: {} Stored: {}", + host, vmDiskStat, vmDiskStat.getIOWrite(), vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { if (logger.isDebugEnabled()) { - logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesRead()) + " Stored: " + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesRead())); + logger.debug("Read # of Bytes that's less " + + "than the last one. Assuming something went wrong and" + + " persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmDiskStat, toHumanReadableSize(vmDiskStat.getBytesRead()), + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesRead())); } vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); } vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { if (logger.isDebugEnabled()) { - logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesWrite()) + " Stored: " - + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite())); + logger.debug("Write # of Bytes that's less " + + "than the last one. Assuming something went wrong " + + "and persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmDiskStat, toHumanReadableSize(vmDiskStat.getBytesWrite()), + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite())); } vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); } @@ -5920,7 +5929,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } }); } catch (Exception e) { - logger.warn(String.format("Unable to update VM disk statistics for %s from %s", userVm.getInstanceName(), host), e); + logger.warn("Unable to update VM disk statistics for {} from {}", userVm, host, e); } } } @@ -5943,7 +5952,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (vm.getRemoved() != null) { - logger.trace("Vm id=" + vmId + " is already expunged"); + logger.trace("Vm {} is already expunged", vm); return vm; } @@ -6125,7 +6134,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingId); } if (diskOffering.isComputeOnly()) { - throw new InvalidParameterValueException(String.format("The disk offering id %d provided is directly mapped to a service offering, please provide an individual disk offering", diskOfferingId)); + throw new InvalidParameterValueException(String.format("The disk offering %s provided is directly mapped to a service offering, please provide an individual disk offering", diskOffering)); } } @@ -6210,7 +6219,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir List child_templates = _templateDao.listByParentTemplatetId(templateId); for (VMTemplateVO tmpl: child_templates){ if (tmpl.getFormat() == Storage.ImageFormat.ISO){ - logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId()); + logger.info("MDOV trying to attach disk {} to the VM {}", tmpl, vm); _tmplService.attachIso(tmpl.getId(), vm.getId(), true); } } @@ -6219,7 +6228,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir String extraConfig = cmd.getExtraConfig(); if (StringUtils.isNotBlank(extraConfig)) { if (EnableAdditionalVmConfig.valueIn(callerId)) { - logger.info("Adding extra configuration to user vm: " + vm.getUuid()); + logger.info("Adding extra configuration to user vm: {}", vm); addExtraConfig(vm, extraConfig); } else { throw new InvalidParameterValueException("attempted setting extraconfig but enable.additional.vm.configuration is disabled"); @@ -6710,7 +6719,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw ex; } - checkIfHostOfVMIsInPrepareForMaintenanceState(vm.getHostId(), vmId, "Migrate"); + checkIfHostOfVMIsInPrepareForMaintenanceState(vm, "Migrate"); if(serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); @@ -6838,19 +6847,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // check max guest vm limit for the destinationHost if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() - + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); + logger.debug("Host: {} already has max Running VMs(count includes system VMs), cannot migrate to this host", destinationHost); } - throw new VirtualMachineMigrationException("Destination host, hostId: " + destinationHost.getId() - + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); + throw new VirtualMachineMigrationException(String.format("Destination host: %s already has max Running VMs(count includes system VMs), cannot migrate to this host", destinationHost)); } //check if there are any ongoing volume snapshots on the volumes associated with the VM. Long vmId = vm.getId(); - logger.debug("Checking if there are any ongoing snapshots volumes associated with VM with ID " + vmId); - if (checkStatusOfVolumeSnapshots(vmId, null)) { + logger.debug("Checking if there are any ongoing snapshots volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, null)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on volume(s) attached to this VM, VM Migration is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volumes associated with the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volumes associated with the vm {}", vm); return dest; } @@ -6878,14 +6885,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - private void checkIfHostOfVMIsInPrepareForMaintenanceState(Long hostId, Long vmId, String operation) { + private void checkIfHostOfVMIsInPrepareForMaintenanceState(VirtualMachine vm, String operation) { + long hostId = vm.getHostId(); HostVO host = _hostDao.findById(hostId); if (host.getResourceState() != ResourceState.PrepareForMaintenance) { return; } - logger.debug("Host is in PrepareForMaintenance state - " + operation + " VM operation on the VM id: " + vmId + " is not allowed"); - throw new InvalidParameterValueException(operation + " VM operation on the VM id: " + vmId + " is not allowed as host is preparing for maintenance mode"); + logger.debug("Host is in PrepareForMaintenance state - {} VM operation on the VM: {} is not allowed", operation, vm); + throw new InvalidParameterValueException(String.format("%s VM operation on the VM: %s is not allowed as host is preparing for maintenance mode", operation, vm)); } private Long accountOfDedicatedHost(HostVO host) { @@ -6930,14 +6938,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //if srcHost is explicitly dedicated and destination Host is not if (srcExplDedicated && !destExplDedicated) { //raise an alert - String msg = "VM is being migrated from a explicitly dedicated host " + srcHost.getName() + " to non-dedicated host " + destHost.getName(); + String msg = String.format("VM is being migrated from a explicitly dedicated host %s to non-dedicated host %s", srcHost, destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } //if srcHost is non dedicated but destination Host is explicitly dedicated if (!srcExplDedicated && destExplDedicated) { //raise an alert - String msg = "VM is being migrated from a non dedicated host " + srcHost.getName() + " to a explicitly dedicated host " + destHost.getName(); + String msg = String.format("VM is being migrated from a non dedicated host %s to a explicitly dedicated host %s", srcHost, destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } @@ -6945,14 +6953,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //if hosts are dedicated to different account/domains, raise an alert if (srcExplDedicated && destExplDedicated) { if (!((accountOfDedicatedHost(srcHost) == null) || (accountOfDedicatedHost(srcHost).equals(accountOfDedicatedHost(destHost))))) { - String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(srcHost) + " to host " - + destHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(destHost); + String msg = String.format("VM is being migrated from host %s explicitly dedicated to account %d to host %s explicitly dedicated to account %d", + srcHost, accountOfDedicatedHost(srcHost), destHost, accountOfDedicatedHost(destHost)); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } if (!((domainOfDedicatedHost(srcHost) == null) || (domainOfDedicatedHost(srcHost).equals(domainOfDedicatedHost(destHost))))) { - String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(srcHost) + " to host " - + destHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(destHost); + String msg = String.format("VM is being migrated from host %s explicitly dedicated to domain %d to host %s explicitly dedicated to domain %d", + srcHost, domainOfDedicatedHost(srcHost), destHost, domainOfDedicatedHost(destHost)); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } @@ -6963,7 +6971,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (deployPlanner.getDeploymentPlanner() != null && deployPlanner.getDeploymentPlanner().equals("ImplicitDedicationPlanner")) { //VM is deployed using implicit planner long accountOfVm = vm.getAccountId(); - String msg = "VM of account " + accountOfVm + " with implicit deployment planner being migrated to host " + destHost.getName(); + String msg = String.format("VM of account %d with implicit deployment planner being migrated to host %s", accountOfVm, destHost); //Get all vms on destination host boolean emptyDestination = false; List vmsOnDest = getVmsOnHost(destHostId); @@ -6976,8 +6984,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (!isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) { //Check if all vms on destination host are created using strict implicit mode if (!checkIfAllVmsCreatedInStrictMode(accountOfVm, vmsOnDest)) { - msg = "VM of account " + accountOfVm + " with strict implicit deployment planner being migrated to host " + destHost.getName() - + " not having all vms strict implicitly dedicated to account " + accountOfVm; + msg = String.format("VM of account %d with strict implicit deployment planner being migrated to host %s not having all vms strict implicitly dedicated to account %d", accountOfVm, destHost, accountOfVm); } } else { //If vm is deployed using preferred implicit planner, check if all vms on destination host must be @@ -6985,8 +6992,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir for (VMInstanceVO vmsDest : vmsOnDest) { ServiceOfferingVO destPlanner = serviceOfferingDao.findById(vm.getId(), vmsDest.getServiceOfferingId()); if (!((destPlanner.getDeploymentPlanner() != null && destPlanner.getDeploymentPlanner().equals("ImplicitDedicationPlanner")) && vmsDest.getAccountId() == accountOfVm)) { - msg = "VM of account " + accountOfVm + " with preffered implicit deployment planner being migrated to host " + destHost.getName() - + " not having all vms implicitly dedicated to account " + accountOfVm; + msg = String.format("VM of account %d with preffered implicit deployment planner being migrated to host %s not having all vms implicitly dedicated to account %d", accountOfVm, destHost, accountOfVm); } } } @@ -7010,15 +7016,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (srcImplDedicated) { if (destImplDedicated) { - msg = "VM is being migrated from implicitly dedicated host " + srcHost.getName() + " to another implicitly dedicated host " + destHost.getName(); + msg = String.format("VM is being migrated from implicitly dedicated host %s to another implicitly dedicated host %s", srcHost, destHost); } else { - msg = "VM is being migrated from implicitly dedicated host " + srcHost.getName() + " to shared host " + destHost.getName(); + msg = String.format("VM is being migrated from implicitly dedicated host %s to shared host %s", srcHost, destHost); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } else { if (destImplDedicated) { - msg = "VM is being migrated from shared host " + srcHost.getName() + " to implicitly dedicated host " + destHost.getName(); + msg = String.format("VM is being migrated from shared host %s to implicitly dedicated host %s", srcHost, destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } @@ -7061,11 +7067,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } for (VMInstanceVO vm : allVmsOnHost) { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId()) || vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit, or running vms of other account"); + logger.info("Host {} for VM {} found to be running a vm created by a planner other than implicit, or running vms of other account", + _hostDao.findById(vm.getHostId()), vm); createdByImplicitStrict = false; break; } else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId()) || vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode, or running vms of other account"); + logger.info("Host {} for VM {} found to be running a vm created by an implicit planner in preferred mode, or running vms of other account", + _hostDao.findById(vm.getHostId()), vm); createdByImplicitStrict = false; break; } @@ -7433,7 +7441,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // make sure the accounts are not same if (oldAccount.getAccountId() == newAccount.getAccountId()) { - throw new InvalidParameterValueException("The new account is the same as the old account. Account id =" + oldAccount.getAccountId()); + throw new InvalidParameterValueException("The new account is the same as the old account. Account: " + oldAccount); } // don't allow to move the vm if there are existing PF/LB/Static Nat @@ -7547,7 +7555,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Can't move vm with network Ids; this is a basic zone VM"); } // cleanup the old security groups - _securityGroupMgr.removeInstanceFromGroups(cmd.getVmId()); + _securityGroupMgr.removeInstanceFromGroups(vm); // cleanup the network for the oldOwner _networkMgr.cleanupNics(vmOldProfile); _networkMgr.removeNics(vmOldProfile); @@ -7608,7 +7616,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmi); _networkMgr.allocate(vmProfile, networks, null); - _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); int securityIdList = securityGroupIdList != null ? securityGroupIdList.size() : 0; logger.debug("AssignVM: Basic zone, adding security groups no " + securityIdList + " to " + vm.getInstanceName()); @@ -7618,7 +7626,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Map requestedIPv6ForNics = new HashMap<>(); if (_networkModel.checkSecurityGroupSupportForNetwork(newAccount, zone, networkIdList, securityGroupIdList)) { // advanced zone with security groups // cleanup the old security groups - _securityGroupMgr.removeInstanceFromGroups(cmd.getVmId()); + _securityGroupMgr.removeInstanceFromGroups(vm); // if networkIdList is null and the first network of vm is shared network, then keep it if possible if (networkIdList == null || networkIdList.isEmpty()) { NicVO defaultNicOld = _nicDao.findDefaultNicForVM(vm.getId()); @@ -7628,7 +7636,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir applicableNetworks.add(defaultNetworkOld); requestedIPv4ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv4Address()); requestedIPv6ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on default nic of vm: {}", defaultNetworkOld, defaultNicOld.getIPv4Address(), vm); } } } @@ -7660,10 +7668,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (nicOld != null) { requestedIPv4ForNics.put(network.getId(), nicOld.getIPv4Address()); requestedIPv6ForNics.put(network.getId(), nicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on nic of vm: {}", network, nicOld.getIPv4Address(), vm); } } - logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); + logger.debug("AssignVM: Added network {} to vm {}", network, vm); applicableNetworks.add(network); } } @@ -7738,11 +7746,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _networkMgr.allocate(vmProfile, networks, null); } - _securityGroupMgr.addInstanceToGroups(vm.getId(), - securityGroupIdList); - logger.debug("AssignVM: Advanced zone, adding security groups no " - + securityGroupIdList.size() + " to " - + vm.getInstanceName()); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); + logger.debug("AssignVM: Advanced zone, adding security groups no {} to {}", securityGroupIdList.size(), vm); } else { if (securityGroupIdList != null && !securityGroupIdList.isEmpty()) { @@ -7757,7 +7762,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir applicableNetworks.add(defaultNetworkOld); requestedIPv4ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv4Address()); requestedIPv6ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on default nic of vm: {}", defaultNetworkOld, defaultNicOld.getIPv4Address(), vm); } } } @@ -7787,10 +7792,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (nicOld != null) { requestedIPv4ForNics.put(network.getId(), nicOld.getIPv4Address()); requestedIPv6ForNics.put(network.getId(), nicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on nic of vm: {}", network, nicOld.getIPv4Address(), vm); } } - logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); + logger.debug("AssignVM: Added network {} to vm {}", network, vm); applicableNetworks.add(network); } } else if (applicableNetworks.isEmpty()) { @@ -7812,8 +7817,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + newAccount + " from the network offering id=" + requiredOfferings.get(0).getId() - + " as a part of deployVM process"); + logger.debug("Creating network for account {} from the network offering {} as a part of deployVM process", newAccount, requiredOfferings.get(0)); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, null, false, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, @@ -7848,7 +7852,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir defaultNetwork = _networkDao.findById(virtualNetworks.get(0).getId()); } } else { - throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + throw new InvalidParameterValueException(String.format("Required network offering %s is not in %s", requiredOfferings.get(0), NetworkOffering.State.Enabled)); } applicableNetworks.add(defaultNetwork); @@ -7874,10 +7878,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VirtualMachine vmi = _itMgr.findById(vm.getId()); VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmi); _networkMgr.allocate(vmProfile, networks, null); - logger.debug("AssignVM: Advance virtual, adding networks no " + networks.size() + " to " + vm.getInstanceName()); + logger.debug("AssignVM: Advance virtual, adding networks no {} to {}", networks.size(), vm); } // END IF NON SEC GRP ENABLED } // END IF ADVANCED - logger.info("AssignVM: vm " + vm.getInstanceName() + " now belongs to account " + newAccount.getAccountName()); + logger.info("AssignVM: vm {} now belongs to account {}", vm, newAccount); return vm; } @@ -7889,7 +7893,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _networkModel.checkNetworkPermissions(newAccount, network); return true; } catch (PermissionDeniedException e) { - logger.debug(String.format("AssignVM: %s network %s can not be used by new account %s", network.getGuestType(), network.getName(), newAccount.getAccountName())); + logger.debug("AssignVM: {} network {} can not be used by new account {}", network.getGuestType(), network, newAccount); return false; } } @@ -7960,11 +7964,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } //check if there are any active snapshots on volumes associated with the VM - logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); - if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, Re-install VM is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm {}", vm); return restoreVMInternal(caller, vm, newTemplateId, rootDiskOfferingId, expunge, details); } @@ -8034,7 +8038,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (owner.getState() == Account.State.DISABLED) { - throw new PermissionDeniedException("The owner of " + vm + " is disabled: " + vm.getAccountId()); + throw new PermissionDeniedException(String.format("The owner of %s is disabled: %s", vm, owner)); } if (vm.getState() != VirtualMachine.State.Running && vm.getState() != VirtualMachine.State.Stopped) { @@ -8069,7 +8073,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { checkRestoreVmFromTemplate(vm, template, rootVols, diskOffering, details); } catch (ResourceAllocationException e) { - logger.error("Failed to restore VM " + vm.getUuid() + " due to " + e.getMessage(), e); + logger.error("Failed to restore VM {} due to {}", vm, e.getMessage(), e); throw new CloudRuntimeException("Failed to restore VM " + vm.getUuid() + " due to " + e.getMessage(), e); } @@ -8077,7 +8081,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { _itMgr.stop(vm.getUuid()); } catch (ResourceUnavailableException e) { - logger.debug("Stop vm " + vm.getUuid() + " failed", e); + logger.debug("Stop vm {} failed", vm, e); CloudRuntimeException ex = new CloudRuntimeException("Stop vm failed for specified vmId"); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; @@ -8128,7 +8132,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } catch (final CloudRuntimeException e) { throw e; } catch (final Exception e) { - logger.error("Unable to restore VM " + userVm.getUuid(), e); + logger.error("Unable to restore VM {}", userVm, e); throw new CloudRuntimeException(e); } @@ -8160,12 +8164,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm.getHypervisorType() == HypervisorType.VMware) { VolumeInfo volumeInStorage = volFactory.getVolume(root.getId()); if (volumeInStorage != null) { - logger.info("Expunging volume " + root.getId() + " from primary data store"); + logger.info("Expunging volume {} from primary data store", root); AsyncCallFuture future = _volService.expungeVolumeAsync(volFactory.getVolume(root.getId())); try { future.get(); } catch (Exception e) { - logger.debug("Failed to expunge volume:" + root.getId(), e); + logger.debug("Failed to expunge volume: {}", root, e); } } } @@ -8212,7 +8216,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - logger.debug("Restore VM " + vmId + " done successfully"); + logger.debug("Restore VM {} done successfully", vm); return vm; } @@ -8416,7 +8420,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (!cmds.isSuccessful()) { for (Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - logger.warn("Failed to reset vm due to: " + answer.getDetails()); + logger.warn("Failed to reset vm {} due to: {}", vm, answer.getDetails()); throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails()); } @@ -8457,12 +8461,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, HostVO host) { + Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; @@ -8470,7 +8474,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } @@ -8509,7 +8513,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir vm.setDetail(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController); _vmDao.saveDetails(vm); if (logger.isDebugEnabled()) { - logger.debug("Persisted device bus information rootDiskController=" + rootDiskController + " for vm: " + vm.getDisplayName()); + logger.debug("Persisted device bus information rootDiskController={} for vm: {}", rootDiskController, vm); } } } @@ -8548,7 +8552,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return true; } - private boolean checkStatusOfVolumeSnapshots(long vmId, Volume.Type type) { + private boolean checkStatusOfVolumeSnapshots(VirtualMachine vm, Volume.Type type) { + long vmId = vm.getId(); List listVolumes = null; if (type == Volume.Type.ROOT) { listVolumes = _volsDao.findByInstanceAndType(vmId, type); @@ -8557,13 +8562,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } else { listVolumes = _volsDao.findByInstance(vmId); } - logger.debug("Found "+listVolumes.size()+" no. of volumes of type "+type+" for vm with VM ID "+vmId); + logger.debug("Found {} no. of volumes of type {} for vm with VM ID {}", listVolumes.size(), type, vm); for (VolumeVO volume : listVolumes) { Long volumeId = volume.getId(); - logger.debug("Checking status of snapshots for Volume with Volume Id: "+volumeId); + logger.debug("Checking status of snapshots for Volume: {}", volume); List ongoingSnapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); int ongoingSnapshotsCount = ongoingSnapshots.size(); - logger.debug("The count of ongoing Snapshots for VM with ID "+vmId+" and disk type "+type+" is "+ongoingSnapshotsCount); + logger.debug("The count of ongoing Snapshots for VM {} and disk type {} is {}", vm, type, ongoingSnapshotsCount); if (ongoingSnapshotsCount > 0) { logger.debug("Found "+ongoingSnapshotsCount+" no. of snapshots, on volume of type "+type+", which snapshots are not yet backed up"); return true; @@ -8601,7 +8606,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - private void detachVolumesFromVm(List volumes) { + private void detachVolumesFromVm(UserVm vm, List volumes) { for (VolumeVO volume : volumes) { // Create new context and inject correct event resource type, id and details, @@ -8620,7 +8625,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (detachResult == null) { - logger.error("DestroyVM remove volume - failed to detach and delete volume " + volume.getInstanceId() + " from instance " + volume.getId()); + logger.error("DestroyVM remove volume - failed to detach and delete volume {} from instance {}", volume, vm); } } } @@ -8643,7 +8648,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Volume result = _volumeService.destroyVolume(volume.getId(), CallContext.current().getCallingAccount(), expunge, false); if (result == null) { - logger.error(String.format("DestroyVM remove volume - failed to delete volume %s from instance %s", volume.getId(), volume.getInstanceId())); + logger.error("DestroyVM remove volume - failed to delete volume {} from instance {}", volume, vm); } } finally { // Remove volumeContext and pop vmContext back @@ -8692,7 +8697,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir boolean result; try { if (vm.getState() != State.Running && vm.getState() != State.Stopped) { - logger.debug("VM ID = " + vmId + " is not running or stopped, cannot be unmanaged"); + logger.debug("VM {} is not running or stopped, cannot be unmanaged", vm); return false; } @@ -8707,14 +8712,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir result = _itMgr.unmanage(vm.getUuid()); if (result) { - cleanupUnmanageVMResources(vm.getId()); + cleanupUnmanageVMResources(vm); unmanageVMFromDB(vm.getId()); publishUnmanageVMUsageEvents(vm, volumes); } else { throw new CloudRuntimeException("Error while unmanaging VM: " + vm.getUuid()); } } catch (Exception e) { - logger.error("Could not unmanage VM " + vm.getUuid(), e); + logger.error("Could not unmanage VM {}", vm, e); throw new CloudRuntimeException(e); } finally { _vmDao.releaseFromLockTable(vm.getId()); @@ -8734,9 +8739,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir /* Cleanup the VM from resources and groups */ - private void cleanupUnmanageVMResources(long vmId) { - cleanupVmResources(vmId); - removeVMFromAffinityGroups(vmId); + private void cleanupUnmanageVMResources(UserVmVO vm) { + cleanupVmResources(vm); + removeVMFromAffinityGroups(vm.getId()); } private void unmanageVMFromDB(long vmId) { @@ -8802,21 +8807,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } private void checkUnmanagingVMOngoingVolumeSnapshots(UserVmVO vm) { - logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vm.getId()); - if (checkStatusOfVolumeSnapshots(vm.getId(), Volume.Type.ROOT)) { + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, vm unmanage is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vm.getId()); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm {}", vm); } private void checkUnmanagingVMVolumes(UserVmVO vm, List volumes) { for (VolumeVO volume : volumes) { if (volume.getInstanceId() == null || !volume.getInstanceId().equals(vm.getId())) { - throw new CloudRuntimeException("Invalid state for volume with ID " + volume.getId() + " of VM " + - vm.getId() +": it is not attached to VM"); + throw new CloudRuntimeException(String.format("Invalid state for volume %s of VM %s: it is not attached to VM", volume, vm)); } else if (volume.getVolumeType() != Volume.Type.ROOT && volume.getVolumeType() != Volume.Type.DATADISK) { - throw new CloudRuntimeException("Invalid type for volume with ID " + volume.getId() + - ": ROOT or DATADISK expected but got " + volume.getVolumeType()); + throw new CloudRuntimeException(String.format("Invalid type for volume %s: ROOT or DATADISK expected but got %s", volume, volume.getVolumeType())); } } } @@ -8864,7 +8867,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (uservm != null) { collectVmDiskAndNetworkStatistics(uservm, expectedState); } else { - logger.info(String.format("Skip collecting vm %s disk and network statistics as it is not user vm", uservm)); + logger.info("Skip collecting vmId {} disk and network statistics as it is not user vm", vmId); } } diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index 2061367cf4d..8d43875190f 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -428,7 +428,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme return createAndPersistVMSnapshot(userVmVo, vsDescription, vmSnapshotName, vsDisplayName, vmSnapshotType); } catch (Exception e) { String msg = e.getMessage(); - logger.error("Create vm snapshot record failed for vm: " + vmId + " due to: " + msg); + logger.error("Create vm snapshot record failed for vm: " + userVmVo + " due to: " + msg); } return null; } @@ -494,7 +494,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme VMSnapshotStrategy snapshotStrategy = storageStrategyFactory.getVmSnapshotStrategy(vmSnapshot); if (snapshotStrategy == null) { - throw new CloudRuntimeException("can't find vm snapshot strategy for vmsnapshot: " + vmSnapshot.getId()); + throw new CloudRuntimeException(String.format("can't find vm snapshot strategy for vmsnapshot: %s", vmSnapshot)); } return snapshotStrategy; @@ -572,7 +572,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme VolumeVO rootVolume = volumeVos.get(0); if(!rootVolume.getState().equals(Volume.State.Ready)) { - throw new CloudRuntimeException("Create vm to snapshot failed due to vm: " + vmId + " has root disk in " + rootVolume.getState() + " state"); + throw new CloudRuntimeException("Create vm to snapshot failed due to vm: " + userVm + " has root disk in " + rootVolume.getState() + " state"); } VMSnapshotVO vmSnapshot = _vmSnapshotDao.findById(vmSnapshotId); @@ -587,7 +587,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme VMSnapshot snapshot = strategy.takeVMSnapshot(vmSnapshot); return snapshot; } catch (Exception e) { - String errMsg = String.format("Failed to create vm snapshot: [%s] due to: %s", vmSnapshotId, e.getMessage()); + String errMsg = String.format("Failed to create vm snapshot: [%s] due to: %s", vmSnapshot, e.getMessage()); logger.debug(errMsg, e); throw new CloudRuntimeException(errMsg, e); } @@ -618,14 +618,14 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme // check VM snapshot states, only allow to delete vm snapshots in created and error state if (VMSnapshot.State.Ready != vmSnapshot.getState() && VMSnapshot.State.Expunging != vmSnapshot.getState() && VMSnapshot.State.Error != vmSnapshot.getState()) { - throw new InvalidParameterValueException("Can't delete the vm snapshotshot " + vmSnapshotId + " due to it is not in Created or Error, or Expunging State"); + throw new InvalidParameterValueException(String.format("Can't delete the vm snapshotshot %s due to it is not in Created or Error, or Expunging State", vmSnapshot)); } // check if there are other active VM snapshot tasks if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: {}", vmSnapshot); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -683,14 +683,14 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme List validStates = Arrays.asList(VMSnapshot.State.Ready, VMSnapshot.State.Expunging, VMSnapshot.State.Error, VMSnapshot.State.Allocated); // check VM snapshot states, only allow to delete vm snapshots in ready, expunging, allocated and error state if (!validStates.contains(vmSnapshot.getState())) { - throw new InvalidParameterValueException("Can't delete the vm snapshot " + vmSnapshotId + " due to it is not in " + validStates.toString() + "States"); + throw new InvalidParameterValueException(String.format("Can't delete the vm snapshot %s due to it is not in %sStates", vmSnapshot, validStates.toString())); } // check if there are other active VM snapshot tasks if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: {}", vmSnapshot); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -703,7 +703,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshot); return strategy.deleteVMSnapshot(vmSnapshot); } catch (Exception e) { - logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); + logger.debug("Failed to delete vm snapshot: {}", vmSnapshot, e); return false; } } @@ -722,7 +722,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme UserVmVO userVm = _userVMDao.findById(vmId); // check if VM exists if (userVm == null) { - throw new InvalidParameterValueException("Revert vm to snapshot: " + vmSnapshotId + " failed due to vm: " + vmId + " is not found"); + throw new InvalidParameterValueException(String.format("Revert vm to snapshot: %s failed due to vm: %d is not found", vmSnapshotVo, vmId)); } // check if there are other active VM snapshot tasks @@ -828,11 +828,11 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme */ protected void changeUserVmServiceOffering(UserVm userVm, VMSnapshotVO vmSnapshotVo) { Map vmDetails = getVmMapDetails(userVm); - boolean result = upgradeUserVmServiceOffering(userVm.getId(), vmSnapshotVo.getServiceOfferingId(), vmDetails); + boolean result = upgradeUserVmServiceOffering(userVm, vmSnapshotVo.getServiceOfferingId(), vmDetails); if (! result){ throw new CloudRuntimeException("VM Snapshot reverting failed due to vm service offering couldn't be changed to the one used when snapshot was taken"); } - logger.debug("Successfully changed service offering to " + vmSnapshotVo.getServiceOfferingId() + " for vm " + userVm.getId()); + logger.debug("Successfully changed service offering to {} for vm {}", _serviceOfferingDao.findById(vmSnapshotVo.getServiceOfferingId()), userVm); } /** @@ -842,16 +842,16 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme * @param details vm details * @return if operation was successful */ - protected boolean upgradeUserVmServiceOffering(Long vmId, Long serviceOfferingId, Map details) { + protected boolean upgradeUserVmServiceOffering(UserVm vm, Long serviceOfferingId, Map details) { boolean result; try { - result = _userVmManager.upgradeVirtualMachine(vmId, serviceOfferingId, details); + result = _userVmManager.upgradeVirtualMachine(vm.getId(), serviceOfferingId, details); if (! result){ - logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId); + logger.error("Couldn't change service offering for vm {} to {}", vm, _serviceOfferingDao.findById(serviceOfferingId)); } return result; } catch (ConcurrentOperationException | ResourceUnavailableException | ManagementServerException | VirtualMachineMigrationException e) { - logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId + " due to: " + e.getMessage()); + logger.error("Couldn't change service offering for vm {} to {} due to: {}", vm, _serviceOfferingDao.findById(serviceOfferingId), e.getMessage()); return false; } } @@ -868,9 +868,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme final UserVmVO userVm = _userVMDao.findById(vmId); // check if VM exists if (userVm == null) { - throw new InvalidParameterValueException("Revert vm to snapshot: " - + vmSnapshotId + " failed due to vm: " + vmId - + " is not found"); + throw new InvalidParameterValueException(String.format("Revert vm to snapshot: %s failed due to vm: %d is not found", vmSnapshotVo, vmId)); } // check if there are other active VM snapshot tasks @@ -901,7 +899,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme vm = _userVMDao.findById(userVm.getId()); hostId = vm.getHostId(); } catch (Exception e) { - logger.error("Start VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Start VM {} before reverting failed due to {}", userVm, e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } else { @@ -909,7 +907,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme try { _itMgr.advanceStop(userVm.getUuid(), true); } catch (Exception e) { - logger.error("Stop VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Stop VM {} before reverting failed due to {}", userVm, e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } @@ -932,7 +930,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme }); return userVm; } catch (Exception e) { - logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); + logger.debug("Failed to revert vmsnapshot: {}", vmSnapshotVo, e); throw new CloudRuntimeException(e.getMessage()); } } @@ -1372,12 +1370,12 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme try { VMSnapshotStrategy strategy = findVMSnapshotStrategy(snapshot); if (! strategy.deleteVMSnapshotFromDB(snapshot, unmanage)) { - logger.error("Couldn't delete vm snapshot with id " + snapshot.getId()); + logger.error("Couldn't delete vm snapshot {}", snapshot); return false; } } catch (CloudRuntimeException e) { - logger.error("Couldn't delete vm snapshot due to: " + e.getMessage()); + logger.error("Couldn't delete vm snapshot {} due to: {}", snapshot, e.getMessage()); } } return true; diff --git a/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java index 01fc96473d2..91bbb349a07 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java @@ -160,7 +160,7 @@ public class ProjectRoleManagerImpl extends ManagerBase implements ProjectRoleSe return null; } if (!(role.getProjectId().equals(projectId))) { - logger.warn(String.format("Project role : %s doesn't belong to the project" + role.getName())); + logger.warn("Project role: {} doesn't belong to the project", role); return null; } return role; diff --git a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java index 60e7093c48b..87b119542c5 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java @@ -107,7 +107,7 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu return null; } if (!isCallerRootAdmin() && (RoleType.Admin == role.getRoleType() || (!role.isPublicRole() && ignorePrivateRoles))) { - logger.debug(String.format("Role [id=%s, name=%s] is either of 'Admin' type or is private and is only visible to 'Root admins'.", id, role.getName())); + logger.debug("Role [{}] is either of 'Admin' type or is private and is only visible to 'Root admins'.", role); return null; } return role; @@ -128,7 +128,7 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu } for (Role role : roles) { if (!isCallerRootAdmin() && (RoleType.Admin == role.getRoleType() || (!role.isPublicRole() && ignorePrivateRoles))) { - logger.debug(String.format("Role [id=%s, name=%s] is either of 'Admin' type or is private and is only visible to 'Root admins'.", role.getId(), role.getName())); + logger.debug("Role [{}] is either of 'Admin' type or is private and is only visible to 'Root admins'.", role); continue; } result.add(role); diff --git a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index 0ec16f1e748..d098ef25652 100644 --- a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -160,7 +160,7 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro AffinityGroupVO group = createAffinityGroup(processor, owner, aclType, affinityGroupName, affinityGroupType, description, domainLevel, domainId); if (logger.isDebugEnabled()) { - logger.debug("Created affinity group =" + affinityGroupName); + logger.debug("Created affinity group {}", group); } CallContext.current().putContextParameter(AffinityGroup.class, group.getUuid()); @@ -260,7 +260,7 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro _messageBus.publish(_name, EntityManager.MESSAGE_REMOVE_ENTITY_EVENT, PublishScope.LOCAL, params); if (logger.isDebugEnabled()) { - logger.debug("Deleted affinity group id=" + affinityGroupIdFinal); + logger.debug("Deleted affinity group {}", group); } return true; } @@ -445,12 +445,14 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.getAccount(vmInstance.getAccountId()); + List affinityGroupList = new ArrayList<>(); // check that the affinity groups exist for (Long affinityGroupId : affinityGroupIds) { AffinityGroupVO ag = _affinityGroupDao.findById(affinityGroupId); if (ag == null) { throw new InvalidParameterValueException("Unable to find affinity group by id " + affinityGroupId); } else { + affinityGroupList.add(ag); // verify permissions if (ag.getAclType() == ACLType.Domain) { _accountMgr.checkAccess(caller, null, false, owner, ag); @@ -475,7 +477,7 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro } _affinityGroupVMMapDao.updateMap(vmId, affinityGroupIds); if (logger.isDebugEnabled()) { - logger.debug("Updated VM :" + vmId + " affinity groups to =" + affinityGroupIds); + logger.debug("Updated VM {} affinity groups to {}", vmInstance, affinityGroupList); } // APIResponseHelper will pull out the updated affinitygroups. return vmInstance; diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java index 8b05a76d0a9..97e503974cf 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java @@ -164,7 +164,7 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement // would be {ResourceState.Creating, ResourceState.Error}; if (!allowedStates.contains(host.getResourceState())) { if (logger.isTraceEnabled()) { - logger.trace(String.format("host is in '%s' state, not adding to the host list, (id = %s)", host.getResourceState(), host.getUuid())); + logger.trace("host ({}) is in '{}' state, not adding to the host list", host, host.getResourceState()); } return; } @@ -174,7 +174,7 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement && host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.SecondaryStorageVM) { if (logger.isTraceEnabled()) { - logger.trace(String.format("host is of wrong type, not adding to the host list, (id = %s, type = %s)", host.getUuid(), host.getType())); + logger.trace(String.format("host (%s) is of wrong type, not adding to the host list, type = %s", host, host.getType())); } return; } @@ -183,7 +183,7 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement && ! (host.getHypervisorType() == Hypervisor.HypervisorType.KVM || host.getHypervisorType() == Hypervisor.HypervisorType.LXC)) { if (logger.isTraceEnabled()) { - logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (id = %s, hypervisortype = %s)", host.getUuid(), host.getHypervisorType())); + logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (host: %s, hypervisortype: %s)", host, host.getHypervisorType())); } return; } diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index 6e13ba135df..2e52d1ccc44 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -191,7 +191,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new PermissionDeniedException("Parameter external can only be specified by a Root Admin, permission denied"); } final BackupProvider backupProvider = getBackupProvider(zoneId); - logger.debug("Listing external backup offerings for the backup provider configured for zone ID " + zoneId); + logger.debug("Listing external backup offerings for the backup provider configured for zone {}", dataCenterDao.findById(zoneId)); return backupProvider.listBackupOfferings(zoneId); } @@ -403,8 +403,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { result = true; } } catch (Exception e) { - logger.error(String.format("Exception caught when trying to remove VM [uuid: %s, name: %s] from the backup offering [uuid: %s, name: %s] due to: [%s].", - vm.getUuid(), vm.getInstanceName(), offering.getUuid(), offering.getName(), e.getMessage()), e); + logger.error("Exception caught when trying to remove VM [{}] from the backup offering [{}] due to: [{}].", vm, offering, e.getMessage(), e); } return result; } @@ -794,8 +793,8 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { HostVO host = restoreInfo.first(); StoragePoolVO datastore = restoreInfo.second(); - logger.debug("Asking provider to restore volume " + backedUpVolumeUuid + " from backup " + backupId + - " (with external ID " + backup.getExternalId() + ") and attach it to VM: " + vm.getUuid()); + logger.debug("Asking provider to restore volume {} from backup {} (with external" + + " ID {}) and attach it to VM: {}", backedUpVolumeUuid, backup, backup.getExternalId(), vm); logger.debug(String.format("Trying to restore volume using host private IP address: [%s].", host.getPrivateIpAddress())); @@ -913,7 +912,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } volumeInfo.setType(Volume.Type.DATADISK); - logger.debug("Attaching the restored volume to VM " + vm.getId()); + logger.debug("Attaching the restored volume to VM {}", vm); StoragePoolVO pool = primaryDataStoreDao.findByUuid(datastoreUuid); try { return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, volumeInfo, vm, pool.getId(), backup); @@ -1080,8 +1079,10 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { logger.debug("Next backup scheduled time for VM ID " + backupSchedule.getVmId() + " is " + nextScheduledTime); break; default: - logger.debug(String.format("Found async backup job [id: %s, vmId: %s] with status [%s] and cmd information: [cmd: %s, cmdInfo: %s].", asyncJob.getId(), backupSchedule.getVmId(), - asyncJob.getStatus(), asyncJob.getCmd(), asyncJob.getCmdInfo())); + logger.debug("Found async backup job [id: {}, uuid: {}, vmId: {}] with " + + "status [{}] and cmd information: [cmd: {}, cmdInfo: {}].", + asyncJob.getId(), asyncJob.getUuid(), backupSchedule.getVmId(), + asyncJob.getStatus(), asyncJob.getCmd(), asyncJob.getCmdInfo()); break; } } @@ -1114,15 +1115,15 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { final Account backupAccount = accountService.getAccount(vm.getAccountId()); if (backupAccount == null || backupAccount.getState() == Account.State.DISABLED) { - logger.debug(String.format("Skip backup for VM [uuid: %s, name: %s] since its account has been removed or disabled.", vm.getUuid(), vm.getInstanceName())); + logger.debug("Skip backup for VM ({}) since its account has been removed or disabled.", vm); continue; } if (logger.isDebugEnabled()) { final Date scheduledTimestamp = backupSchedule.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - logger.debug(String.format("Scheduling 1 backup for VM [ID: %s, name: %s, hostName: %s] for backup schedule id: [%s] at [%s].", - vm.getId(), vm.getInstanceName(), vm.getHostName(), backupSchedule.getId(), displayTime)); + logger.debug(String.format("Scheduling 1 backup for VM (%s) for backup schedule (%s) at [%s].", + vm, backupSchedule, displayTime)); } BackupScheduleVO tmpBackupScheduleVO = null; @@ -1219,19 +1220,19 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } for (final DataCenter dataCenter : dataCenterDao.listAllZones()) { if (dataCenter == null || isDisabled(dataCenter.getId())) { - logger.debug(String.format("Backup Sync Task is not enabled in zone [%s]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter.getId())); + logger.debug("Backup Sync Task is not enabled in zone [{}]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter); continue; } final BackupProvider backupProvider = getBackupProvider(dataCenter.getId()); if (backupProvider == null) { - logger.warn("Backup provider not available or configured for zone ID " + dataCenter.getId()); + logger.warn("Backup provider not available or configured for zone {}", dataCenter); continue; } List vms = vmInstanceDao.listByZoneWithBackups(dataCenter.getId(), null); if (vms == null || vms.isEmpty()) { - logger.debug(String.format("Can't find any VM to sync backups in zone [id: %s].", dataCenter.getId())); + logger.debug("Can't find any VM to sync backups in zone {}", dataCenter); continue; } @@ -1256,7 +1257,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { try { final Backup.Metric metric = metrics.get(vm); if (metric != null) { - logger.debug(String.format("Trying to sync backups of VM [%s] using backup provider [%s].", vm.getUuid(), backupProvider.getName())); + logger.debug(String.format("Trying to sync backups of VM [%s] using backup provider [%s].", vm, backupProvider.getName())); // Sync out-of-band backups backupProvider.syncBackups(vm, metric); // Emit a usage event, update usage metric for the VM by the usage server @@ -1266,7 +1267,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { Backup.class.getSimpleName(), vm.getUuid()); } } catch (final Exception e) { - logger.error(String.format("Failed to sync backup usage metrics and out-of-band backups of VM [%s] due to: [%s].", vm.getUuid(), e.getMessage()), e); + logger.error("Failed to sync backup usage metrics and out-of-band backups of VM [{}] due to: [{}].", vm, e.getMessage(), e); } } @@ -1288,8 +1289,9 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { if (backupOfferingVO == null) { throw new InvalidParameterValueException(String.format("Unable to find Backup Offering with id: [%s].", id)); } - logger.debug(String.format("Trying to update Backup Offering %s to %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backupOfferingVO,"uuid", "name", - "description", "userDrivenBackupAllowed"), ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this,"name", "description", "allowUserDrivenBackups"))); + logger.debug("Trying to update Backup Offering {} to {}.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backupOfferingVO, "uuid", "name", "description", "userDrivenBackupAllowed"), + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(updateBackupOfferingCmd, "name", "description", "allowUserDrivenBackups")); BackupOfferingVO offering = backupOfferingDao.createForUpdate(id); List fields = new ArrayList<>(); @@ -1310,7 +1312,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } if (!backupOfferingDao.update(id, offering)) { - logger.warn(String.format("Couldn't update Backup offering [id: %s] with [%s].", id, String.join(", ", fields))); + logger.warn(String.format("Couldn't update Backup offering (%s) with [%s].", backupOfferingVO, String.join(", ", fields))); } BackupOfferingVO response = backupOfferingDao.findById(id); diff --git a/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java index d4ccac69d5f..22f8939e7eb 100644 --- a/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java @@ -195,8 +195,8 @@ public class CAManagerImpl extends ManagerBase implements CAManager { final Certificate certificate = issueCertificate(csr, Arrays.asList(host.getName(), host.getPrivateIpAddress()), Arrays.asList(host.getPrivateIpAddress(), host.getPublicIpAddress(), host.getStorageIpAddress()), CAManager.CertValidityPeriod.value(), caProvider); return deployCertificate(host, certificate, reconnect, null); } catch (final AgentUnavailableException | OperationTimedoutException e) { - logger.error("Host/agent is not available or operation timed out, failed to setup keystore and generate CSR for host/agent id=" + host.getId() + ", due to: ", e); - throw new CloudRuntimeException("Failed to generate keystore and get CSR from the host/agent id=" + host.getId()); + logger.error("Host/agent is not available or operation timed out, failed to setup keystore and generate CSR for host/agent {}, due to: ", host, e); + throw new CloudRuntimeException(String.format("Failed to generate keystore and get CSR from the host/agent %s", host)); } } @@ -234,11 +234,11 @@ public class CAManagerImpl extends ManagerBase implements CAManager { if (answer.getResult()) { getActiveCertificatesMap().put(host.getPrivateIpAddress(), certificate.getClientCertificate()); if (sshAccessDetails == null && reconnect != null && reconnect) { - logger.info(String.format("Successfully setup certificate on host, reconnecting with agent with id=%d, name=%s, address=%s", host.getId(), host.getName(), host.getPublicIpAddress())); + logger.info("Successfully setup certificate on host, reconnecting with agent [{}] with address={}", host, host.getPublicIpAddress()); try { agentManager.reconnect(host.getId()); } catch (AgentUnavailableException | CloudRuntimeException e) { - logger.debug("Error when reconnecting to host: " + host.getUuid(), e); + logger.debug("Error when reconnecting to host: {}", host, e); } } return true; diff --git a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java index 3f312e0ba3e..a662d47d454 100644 --- a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java @@ -210,7 +210,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ try { updateDrsPlanMigrations(plan); } catch (Exception e) { - logger.error(String.format("Unable to update DRS plan details [id=%d]", plan.getId()), e); + logger.error("Unable to update DRS plan details {}", plan, e); } } } @@ -228,7 +228,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ drsPlanDao.update(plan.getId(), plan); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, EventVO.LEVEL_INFO, EventTypes.EVENT_CLUSTER_DRS, true, - String.format("DRS execution task completed for cluster [id=%s]", plan.getClusterId()), + String.format("DRS execution task completed for cluster %s", clusterDao.findById(plan.getClusterId())), plan.getClusterId(), ApiCommandResourceType.Cluster.toString(), plan.getEventId()); return; } @@ -237,8 +237,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ try { AsyncJobVO job = asyncJobManager.getAsyncJob(migration.getJobId()); if (job == null) { - logger.warn(String.format("Unable to find async job [id=%d] for DRS plan migration [id=%d]", - migration.getJobId(), migration.getId())); + logger.warn("Unable to find async job [id={}] for DRS plan migration {}", migration.getJobId(), migration); migration.setStatus(JobInfo.Status.FAILED); drsPlanMigrationDao.update(migration.getId(), migration); continue; @@ -248,7 +247,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ drsPlanMigrationDao.update(migration.getId(), migration); } } catch (Exception e) { - logger.error(String.format("Unable to update DRS plan migration [id=%d]", migration.getId()), e); + logger.error("Unable to update DRS plan migration {}", migration, e); } } } @@ -291,13 +290,9 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ ClusterDrsMaxMigrations.valueIn(cluster.getId())); savePlan(cluster.getId(), plan, eventId, ClusterDrsPlan.Type.AUTOMATED, ClusterDrsPlan.Status.READY); - logger.info(String.format("Generated DRS plan for cluster %s [id=%s]", cluster.getName(), - cluster.getUuid())); + logger.info("Generated DRS plan for cluster {}", cluster); } catch (Exception e) { - logger.error( - String.format("Unable to generate DRS plans for cluster %s [id=%s]", cluster.getName(), - cluster.getUuid()), - e); + logger.error("Unable to generate DRS plans for cluster {}", cluster, e); } finally { clusterLock.unlock(); } @@ -362,7 +357,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId())); } - while (iteration < maxIterations && algorithm.needsDrs(cluster.getId(), new ArrayList<>(hostCpuMap.values()), + while (iteration < maxIterations && algorithm.needsDrs(cluster, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()))) { Pair bestMigration = getBestMigration(cluster, algorithm, vmList, vmIdServiceOfferingMap, hostCpuMap, hostMemoryMap); @@ -372,8 +367,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ logger.debug("VM migrating to it's original host or no host found for migration"); break; } - logger.debug(String.format("Plan for VM %s to migrate from host %s to host %s", vm.getUuid(), - hostMap.get(vm.getHostId()).getUuid(), destHost.getUuid())); + logger.debug("Plan for VM {} to migrate from host {} to host {}", vm, hostMap.get(vm.getHostId()), destHost); ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId()); migrationPlan.add(new Ternary<>(vm, hostMap.get(vm.getHostId()), hostMap.get(destHost.getId()))); @@ -467,7 +461,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ if (!suitableDestinationHosts.contains(destHost) || cluster.getId() != destHost.getClusterId()) { continue; } - Ternary metrics = algorithm.getMetrics(cluster.getId(), vm, + Ternary metrics = algorithm.getMetrics(cluster, vm, vmIdServiceOfferingMap.get(vm.getId()), destHost, hostCpuCapacityMap, hostMemoryCapacityMap, requiresStorageMotion.get(destHost)); @@ -528,7 +522,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ try { executeDrsPlan(plan); } catch (Exception e) { - logger.error(String.format("Unable to execute DRS plan [id=%d]", plan.getId()), e); + logger.error("Unable to execute DRS plan {}", plan, e); } } } @@ -564,16 +558,14 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ migration.getDestHostId())); } - logger.debug( - String.format("Executing DRS plan %s for vm %s to host %s", plan.getId(), vm.getInstanceName(), - host.getName())); + logger.debug("Executing DRS plan {} for vm {} to host {}", plan, vm, host); long jobId = createMigrateVMAsyncJob(vm, host, plan.getEventId()); AsyncJobVO job = asyncJobManager.getAsyncJob(jobId); migration.setJobId(jobId); migration.setStatus(job.getStatus()); drsPlanMigrationDao.update(migration.getId(), migration); } catch (Exception e) { - logger.warn(String.format("Unable to execute DRS plan %s due to %s", plan.getUuid(), e.getMessage())); + logger.warn("Unable to execute DRS plan {} due to {}", plan, e.getMessage()); migration.setStatus(JobInfo.Status.FAILED); drsPlanMigrationDao.update(migration.getId(), migration); } diff --git a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java index 124ca05cc37..a69bbcd1ee9 100644 --- a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java @@ -222,8 +222,8 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce String sessionUuid = UUID.randomUUID().toString(); return generateAccessEndpoint(vmId, sessionUuid, extraSecurityToken, clientAddress); } catch (Exception e) { - String errorMsg = String.format("Unexepected exception in ConsoleAccessManager - vmId: %s, clientAddress: %s", - vmId, clientAddress); + String errorMsg = String.format("Unexpected exception in ConsoleAccessManager - vmId: %s (%s), clientAddress: %s", + vmId, entityManager.findById(VirtualMachine.class, vmId), clientAddress); logger.error(errorMsg, e); return new ConsoleEndpoint(false, null, "Server Internal Error: " + e.getMessage()); } @@ -264,15 +264,17 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce } catch (PermissionDeniedException ex) { if (accountManager.isNormalUser(account.getId())) { if (logger.isDebugEnabled()) { - logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + - vm.getAccountId() + " does not match the account id in session " + - account.getId() + " and caller is a normal user"); + logger.debug("VM access is denied for VM {}. VM owner " + + "account {} does not match the account id in session {} and " + + "caller is a normal user", vm, + accountManager.getAccount(vm.getAccountId()), account); } } else if ((accountManager.isDomainAdmin(account.getId()) || account.getType() == Account.Type.READ_ONLY_ADMIN) && logger.isDebugEnabled()) { - logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + - vm.getAccountId() + " does not match the account id in session " + - account.getId() + " and the domain-admin caller does not manage the target domain"); + logger.debug("VM access is denied for VM {}. VM owner account {}" + + " does not match the account id in session {} and the " + + "domain-admin caller does not manage the target domain", + vm, accountManager.getAccount(vm.getAccountId()), account); } return false; } @@ -300,23 +302,22 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce throw new CloudRuntimeException(msg); } - String vmUuid = vm.getUuid(); if (unsupportedConsoleVMState.contains(vm.getState())) { - msg = "VM " + vmUuid + " must be running to connect console, sending blank response for console access request"; + msg = String.format("VM %s must be running to connect console, sending blank response for console access request", vm); logger.warn(msg); throw new CloudRuntimeException(msg); } Long hostId = vm.getState() != VirtualMachine.State.Migrating ? vm.getHostId() : vm.getLastHostId(); if (hostId == null) { - msg = "VM " + vmUuid + " lost host info, sending blank response for console access request"; + msg = String.format("VM %s lost host info, sending blank response for console access request", vm); logger.warn(msg); throw new CloudRuntimeException(msg); } HostVO host = managementServer.getHostBy(hostId); if (host == null) { - msg = "VM " + vmUuid + "'s host does not exist, sending blank response for console access request"; + msg = String.format("Host for VM %s does not exist, sending blank response for console access request", vm); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -569,7 +570,7 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce private void setWebsocketUrl(VirtualMachine vm, ConsoleProxyClientParam param) { String ticket = acquireVncTicketForVmwareVm(vm); if (StringUtils.isBlank(ticket)) { - logger.error("Could not obtain VNC ticket for VM " + vm.getInstanceName()); + logger.error(String.format("Could not obtain VNC ticket for VM %s", vm)); return; } String wsUrl = composeWebsocketUrlForVmwareVm(ticket, param); @@ -590,7 +591,7 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce */ private String acquireVncTicketForVmwareVm(VirtualMachine vm) { try { - logger.info("Acquiring VNC ticket for VM = " + vm.getHostName()); + logger.info("Acquiring VNC ticket for VM = {}", vm); GetVmVncTicketCommand cmd = new GetVmVncTicketCommand(vm.getInstanceName()); Answer answer = agentManager.send(vm.getHostId(), cmd); GetVmVncTicketAnswer ans = (GetVmVncTicketAnswer) answer; diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index 3e6c460a169..57321fa0910 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -149,7 +149,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ final Map accessDetails = networkManager.getSystemVMAccessDetails(vmInstance); if (StringUtils.isEmpty(accessDetails.get(NetworkElementCommand.ROUTER_IP))) { - throw new CloudRuntimeException("Unable to set system vm ControlIP for system vm with ID: " + vmId); + throw new CloudRuntimeException("Unable to set system vm ControlIP for system vm: " + vmInstance); } command.setAccessDetail(accessDetails); @@ -227,7 +227,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ final long zoneId = vmInstance.getDataCenterId(); VMInstanceVO ssvm = getSecondaryStorageVmInZone(zoneId); if (ssvm == null) { - throw new CloudRuntimeException("No SSVM found in zone with ID: " + zoneId); + throw new CloudRuntimeException("No SSVM found in zone: " + dataCenterDao.findById(zoneId)); } // Secondary Storage install path = "diagnostics_data/diagnostics_files_xxxx.tar @@ -265,7 +265,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ private void configureNetworkElementCommand(NetworkElementCommand cmd, VMInstanceVO vmInstance) { Map accessDetails = networkManager.getSystemVMAccessDetails(vmInstance); if (StringUtils.isBlank(accessDetails.get(NetworkElementCommand.ROUTER_IP))) { - throw new CloudRuntimeException("Unable to set system vm ControlIP for system vm with ID: " + vmInstance.getId()); + throw new CloudRuntimeException(String.format("Unable to set system vm ControlIP for system vm: %s", vmInstance)); } cmd.setAccessDetail(accessDetails); } @@ -282,10 +282,10 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ configureNetworkElementCommand(cmd, vmInstance); final Answer fileCleanupAnswer = agentManager.easySend(vmInstance.getHostId(), cmd); if (fileCleanupAnswer == null) { - logger.error(String.format("Failed to cleanup diagnostics zip file on vm: %s", vmInstance.getUuid())); + logger.error("Failed to cleanup diagnostics zip file on vm: {}", vmInstance); } else { if (!fileCleanupAnswer.getResult()) { - logger.error(String.format("Zip file cleanup for vm %s has failed with: %s", vmInstance.getUuid(), fileCleanupAnswer.getDetails())); + logger.error("Zip file cleanup for vm {} has failed with: {}", vmInstance, fileCleanupAnswer.getDetails()); } } @@ -328,7 +328,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ boolean success = false; String mountPoint = mountManager.getMountPoint(store.getUri(), imageStoreDetailsUtil.getNfsVersion(store.getId())); if (StringUtils.isBlank(mountPoint)) { - logger.error("Failed to generate mount point for copying to secondary storage for " + store.getName()); + logger.error("Failed to generate mount point for copying to secondary storage for {}", store); return new Pair<>(false, "Failed to mount secondary storage:" + store.getName()); } @@ -371,7 +371,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ private DataStore getImageStore(Long zoneId) { List stores = storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (CollectionUtils.isEmpty(stores)) { - throw new CloudRuntimeException("No Secondary storage found in Zone with Id: " + zoneId); + throw new CloudRuntimeException(String.format("No Secondary storage found in Zone: %s", dataCenterDao.findById(zoneId))); } DataStore imageStore = null; for (DataStore store : stores) { @@ -382,7 +382,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ } } if (imageStore == null) { - throw new CloudRuntimeException("No suitable secondary storage found to retrieve diagnostics in Zone: " + zoneId); + throw new CloudRuntimeException(String.format("No suitable secondary storage found to retrieve diagnostics in Zone: %s", dataCenterDao.findById(zoneId))); } return imageStore; } @@ -418,7 +418,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ Map accessDetails = networkManager.getSystemVMAccessDetails(vmInstance); String controlIP = accessDetails.get(NetworkElementCommand.ROUTER_IP); if (StringUtils.isBlank(controlIP)) { - throw new CloudRuntimeException("Unable to find system vm ssh/control IP for vm with ID: " + vmInstance.getId()); + throw new CloudRuntimeException(String.format("Unable to find system vm ssh/control IP for vm: %s", vmInstance)); } return controlIP; } diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java index de66ad4d5e6..34260f60625 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java @@ -24,6 +24,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataTO; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class DiagnosticsDataObject implements DataObject { private DataTO dataTO; @@ -34,6 +35,13 @@ public class DiagnosticsDataObject implements DataObject { this.dataStore = dataStore; } + @Override + public String toString() { + return String.format("DiagnosticsDataObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "dataTO", "dataStore")); + } + @Override public long getId() { return 0; diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java index e15476819ce..005e24a8bce 100644 --- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java @@ -288,7 +288,7 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown VMTemplateStoragePoolVO sPoolRef = vmTemplatePoolDao.findByPoolTemplate(poolId, templateId, null); if (sPoolRef == null) { if (logger.isDebugEnabled()) { - logger.debug("Not found (templateId:" + templateId + " poolId: " + poolId + ") in template_spool_ref, persisting it"); + logger.debug("Not found (template: {} pool: {}) in template_spool_ref, persisting it", template, pool); } DirectDownloadAnswer ans = (DirectDownloadAnswer) answer; sPoolRef = new VMTemplateStoragePoolVO(poolId, templateId, null); @@ -365,14 +365,14 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown retry --; } if (!downloaded) { - logUsageEvent(template, poolId); + logUsageEvent(template, storagePoolVO); if (!answerDetails.isEmpty()){ Account caller = CallContext.current().getCallingAccount(); if (caller != null && caller.getType() == Account.Type.ADMIN){ errorDetails = String.format(" Details: %s", answerDetails); } } - throw new CloudRuntimeException(String.format("Template %d could not be downloaded on pool %d, failing after trying on several hosts%s", template.getId(), poolId, errorDetails)); + throw new CloudRuntimeException(String.format("Template %s could not be downloaded on pool %s, failing after trying on several hosts%s", template, storagePoolVO, errorDetails)); } return answer; } @@ -380,12 +380,12 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown /** * Log and persist event for direct download failure */ - private void logUsageEvent(VMTemplateVO template, long poolId) { + private void logUsageEvent(VMTemplateVO template, StoragePoolVO pool) { String event = EventTypes.EVENT_TEMPLATE_DIRECT_DOWNLOAD_FAILURE; if (template.getFormat() == ImageFormat.ISO) { event = EventTypes.EVENT_ISO_DIRECT_DOWNLOAD_FAILURE; } - String description = "Direct Download for template Id: " + template.getId() + " on pool Id: " + poolId + " failed"; + String description = String.format("Direct Download for template: %s on pool: %s failed", template, pool); logger.error(description); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), template.getAccountId(), EventVO.LEVEL_INFO, event, description, template.getId(), ApiCommandResourceType.Template.toString(), 0); } @@ -507,12 +507,12 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown hosts = Collections.singletonList(host); certificateVO = directDownloadCertificateDao.findByAlias(alias, hypervisorType, zoneId); if (certificateVO == null) { - logger.info("Certificate must be uploaded on zone " + zoneId); + logger.info("Certificate must be uploaded on zone {}", () -> dataCenterDao.findById(zoneId)); return new Pair<>(certificateVO, new ArrayList<>()); } } - logger.info("Attempting to upload certificate: " + alias + " to " + hosts.size() + " hosts on zone " + zoneId); + logger.info("Attempting to upload certificate: {} to {} hosts on zone {}", () -> alias, hosts::size, () -> dataCenterDao.findById(zoneId)); int success = 0; int failed = 0; List results = new ArrayList<>(); @@ -522,9 +522,9 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown continue; } HostCertificateStatus hostStatus; - Pair result = provisionCertificate(certificateVO.getId(), host.getId()); + Pair result = provisionCertificate(certificateVO, host); if (!result.first()) { - String msg = "Could not upload certificate " + alias + " on host: " + host.getName() + " (" + host.getUuid() + "): " + result.second(); + String msg = String.format("Could not upload certificate %s on host: %s: %s", alias, host, result.second()); logger.error(msg); failed++; hostStatus = new HostCertificateStatus(CertificateStatus.FAILED, host, result.second()); @@ -539,17 +539,17 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown return new Pair<>(certificateVO, results); } - private Pair setupCertificateOnHost(DirectDownloadCertificate certificate, long hostId) { + private Pair setupCertificateOnHost(DirectDownloadCertificate certificate, Host host) { String certificateStr = certificate.getCertificate(); String alias = certificate.getAlias(); long certificateId = certificate.getId(); - logger.debug("Uploading certificate: " + alias + " to host " + hostId); + logger.debug("Uploading certificate: {} to host {}", alias, host); SetupDirectDownloadCertificateCommand cmd = new SetupDirectDownloadCertificateCommand(certificateStr, alias); - Answer answer = agentManager.easySend(hostId, cmd); + Answer answer = agentManager.easySend(host.getId(), cmd); Pair result; if (answer == null || !answer.getResult()) { - String msg = "Certificate " + alias + " could not be added to host " + hostId; + String msg = String.format("Certificate %s could not be added to host %s", alias, host); if (answer != null) { msg += " due to: " + answer.getDetails(); } @@ -559,13 +559,13 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown result = new Pair<>(true, "OK"); } - logger.info("Certificate " + alias + " successfully uploaded to host: " + hostId); - DirectDownloadCertificateHostMapVO map = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateId, hostId); + logger.info("Certificate {} successfully uploaded to host: {}", alias, host); + DirectDownloadCertificateHostMapVO map = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateId, host.getId()); if (map != null) { map.setRevoked(false); directDownloadCertificateHostMapDao.update(map.getId(), map); } else { - DirectDownloadCertificateHostMapVO mapVO = new DirectDownloadCertificateHostMapVO(certificateId, hostId); + DirectDownloadCertificateHostMapVO mapVO = new DirectDownloadCertificateHostMapVO(certificateId, host.getId()); directDownloadCertificateHostMapDao.persist(mapVO); } return result; @@ -574,19 +574,22 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown * Upload and import certificate to hostId on keystore */ public Pair provisionCertificate(long certificateId, long hostId) { - DirectDownloadCertificateVO certificateVO = directDownloadCertificateDao.findById(certificateId); - if (certificateVO == null) { - throw new CloudRuntimeException("Could not find certificate with id " + certificateId + " to upload to host: " + hostId); - } HostVO host = hostDao.findById(hostId); if (host == null) { throw new CloudRuntimeException("Cannot find a host with ID " + hostId); } + DirectDownloadCertificateVO certificateVO = directDownloadCertificateDao.findById(certificateId); + if (certificateVO == null) { + throw new CloudRuntimeException(String.format("Could not find certificate with id %d to upload to host: %s", certificateId, host)); + } + return provisionCertificate(certificateVO, host); + } + + public Pair provisionCertificate(DirectDownloadCertificate certificate, Host host) { if (host.getHypervisorType() != HypervisorType.KVM) { throw new CloudRuntimeException("Cannot provision certificate to host " + host.getName() + " since it is not KVM"); } - - return setupCertificateOnHost(certificateVO, hostId); + return setupCertificateOnHost(certificate, host); } @Override @@ -594,32 +597,33 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown List zoneCertificates = directDownloadCertificateDao.listByZone(zoneId); if (CollectionUtils.isEmpty(zoneCertificates)) { if (logger.isTraceEnabled()) { - logger.trace("No certificates to sync on host: " + hostId); + logger.trace("No certificates to sync on host: {}", () -> hostDao.findById(hostId)); } return true; } boolean syncCertificatesResult = true; int certificatesSyncCount = 0; - logger.debug("Syncing certificates on host: " + hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Syncing certificates on host: {}", host); for (DirectDownloadCertificateVO certificateVO : zoneCertificates) { DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostId); if (mapping == null) { - logger.debug("Syncing certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", uploading it"); + logger.debug("Syncing certificate {} on host: {}, uploading it", certificateVO, host); Pair result = provisionCertificate(certificateVO.getId(), hostId); if (!result.first()) { - String msg = "Could not sync certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", upload failed: " + result.second(); + String msg = String.format("Could not sync certificate %s on host: %s, upload failed: %s", certificateVO, host, result.second()); logger.error(msg); syncCertificatesResult = false; } else { certificatesSyncCount++; } } else { - logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") already synced on host: " + hostId); + logger.debug("Certificate {} already synced on host: {}", certificateVO, host); } } - logger.debug("Synced " + certificatesSyncCount + " out of " + zoneCertificates.size() + " certificates on host: " + hostId); + logger.debug("Synced {} out of {} certificates on host: {}", certificatesSyncCount, zoneCertificates.size(), host); return syncCertificatesResult; } @@ -630,11 +634,11 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown } else { DirectDownloadCertificateHostMapVO hostMap = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificate.getId(), hostId); if (hostMap == null) { - String msg = "Certificate " + certificate.getAlias() + " cannot be revoked from host " + hostId + " as it is not available on the host"; + String msg = String.format("Certificate %s cannot be revoked from host %s as it is not available on the host", certificate, hostDao.findById(hostId)); logger.error(msg); throw new CloudRuntimeException(msg); } else if (hostMap.isRevoked()) { - logger.debug("Certificate " + certificate.getAlias() + " was already revoked from host " + hostId + " skipping it"); + logger.debug("Certificate {} was already revoked from host {} skipping it", certificate, hostDao.findById(hostId)); return new LinkedList<>(); } maps = Collections.singletonList(hostMap); @@ -684,21 +688,21 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown if (host == null || host.getDataCenterId() != zoneId || host.getHypervisorType() != HypervisorType.KVM) { if (host != null) { String reason = host.getDataCenterId() != zoneId ? "Host is not in the zone " + zoneId : "Host hypervisor is not KVM"; - logger.debug("Skipping host " + host.getName() + ": " + reason); + logger.debug("Skipping host {}: {}", host, reason); hostStatus = new HostCertificateStatus(CertificateStatus.SKIPPED, host, reason); hostsList.add(hostStatus); } skipped++; continue; } - Pair result = revokeCertificateAliasFromHost(certificateAlias, mappingHostId); + Pair result = revokeCertificateAliasFromHost(certificateAlias, host); if (!result.first()) { - String msg = "Could not revoke certificate from host: " + mappingHostId + ": " + result.second(); + String msg = String.format("Could not revoke certificate from host: %s: %s", host, result.second()); logger.error(msg); hostStatus = new HostCertificateStatus(CertificateStatus.FAILED, host, result.second()); failed++; } else { - logger.info("Certificate " + certificateAlias + " revoked from host " + mappingHostId); + logger.info("Certificate {} revoked from host {}", certificate, host); map.setRevoked(true); hostStatus = new HostCertificateStatus(CertificateStatus.REVOKED, host, null); success++; @@ -706,8 +710,7 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown } hostsList.add(hostStatus); } - logger.info(String.format("Certificate alias %s revoked from: %d hosts, %d failed, %d skipped", - certificateAlias, success, failed, skipped)); + logger.info("Certificate alias {} revoked from: {} hosts, {} failed, {} skipped", certificate, success, failed, skipped); return hostsList; } @@ -735,13 +738,13 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown return new LinkedList<>(directDownloadCertificateHostMapDao.listByCertificateId(certificateId)); } - protected Pair revokeCertificateAliasFromHost(String alias, Long hostId) { + protected Pair revokeCertificateAliasFromHost(String alias, Host host) { RevokeDirectDownloadCertificateCommand cmd = new RevokeDirectDownloadCertificateCommand(alias); try { - Answer answer = agentManager.send(hostId, cmd); + Answer answer = agentManager.send(host.getId(), cmd); return new Pair<>(answer != null && answer.getResult(), answer != null ? answer.getDetails() : ""); } catch (AgentUnavailableException | OperationTimedoutException e) { - logger.error("Error revoking certificate " + alias + " from host " + hostId, e); + logger.error("Error revoking certificate {} from host {}", alias, host, e); return new Pair<>(false, e.getMessage()); } } @@ -822,15 +825,11 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown for (HostVO hostVO : hostsToUpload) { DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostVO.getId()); if (mapping == null) { - logger.debug("Certificate " + certificateVO.getId() + - " (" + certificateVO.getAlias() + ") was not uploaded to host: " + hostVO.getId() + - " uploading it"); + logger.debug("Certificate {} was not uploaded to host: {} uploading it", certificateVO, hostVO); Pair result = directDownloadManager.provisionCertificate(certificateVO.getId(), hostVO.getId()); - logger.debug("Certificate " + certificateVO.getAlias() + " " + - (result.first() ? "uploaded" : "could not be uploaded") + - " to host " + hostVO.getId()); + logger.debug("Certificate {} {} to host {}", certificateVO, result.first() ? "uploaded" : "could not be uploaded", hostVO); if (!result.first()) { - logger.error("Certificate " + certificateVO.getAlias() + " failed: " + result.second()); + logger.error("Certificate {} failed: {}", certificateVO, result.second()); } } } diff --git a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java index 2ab252430d5..a016be5c6e3 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java @@ -153,8 +153,8 @@ public final class HAManagerImpl extends ManagerBase implements HAManager, Clust final HAConfig.HAState nextState = HAConfig.HAState.getStateMachine().getNextState(currentHAState, event); boolean result = HAConfig.HAState.getStateMachine().transitTo(haConfig, event, null, haConfigDao); if (result) { - final String message = String.format("Transitioned host HA state from:%s to:%s due to event:%s for the host id:%d", - currentHAState, nextState, event, haConfig.getResourceId()); + final String message = String.format("Transitioned host HA state from: %s to: %s due to event:%s for the host %s with id: %d", + currentHAState, nextState, event, hostDao.findByIdIncludingRemoved(haConfig.getResourceId()), haConfig.getResourceId()); logger.debug(message); if (nextState == HAConfig.HAState.Recovering || nextState == HAConfig.HAState.Fencing || nextState == HAConfig.HAState.Fenced) { @@ -164,7 +164,8 @@ public final class HAManagerImpl extends ManagerBase implements HAManager, Clust } return result; } catch (NoTransitionException e) { - logger.warn(String.format("Unable to find next HA state for current HA state=[%s] for event=[%s] for host=[%s].", currentHAState, event, haConfig.getResourceId()), e); + logger.warn("Unable to find next HA state for current HA state=[{}] for event=[{}] for host {} with id {}.", + currentHAState, event, hostDao.findByIdIncludingRemoved(haConfig.getResourceId()), haConfig.getResourceId(), e); } return false; } @@ -294,7 +295,7 @@ public final class HAManagerImpl extends ManagerBase implements HAManager, Clust } if (!host.getHypervisorType().toString().equals(haProvider.resourceSubType().toString())) { - throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Incompatible haprovider provided [%s] for the resource [%s] of hypervisor type: [%s].", haProvider.resourceSubType().toString(), host.getId(),host.getHypervisorType())); + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Incompatible haprovider provided [%s] for the resource [%s] of hypervisor type: [%s].", haProvider.resourceSubType().toString(), host.getUuid(), host.getHypervisorType())); } } } @@ -307,10 +308,10 @@ public final class HAManagerImpl extends ManagerBase implements HAManager, Clust final HAConfig haConfig = haConfigDao.findHAResource(host.getId(), HAResource.ResourceType.Host); if (haConfig != null) { if (haConfig.getState() == HAConfig.HAState.Fenced) { - logger.debug(String.format("HA: Host [%s] is fenced.", host.getId())); + logger.debug("HA: Host [{}] is fenced.", host); return false; } - logger.debug(String.format("HA: Host [%s] is alive.", host.getId())); + logger.debug("HA: Host [{}] is alive.", host); return true; } throw new Investigator.UnknownVM(); @@ -320,10 +321,10 @@ public final class HAManagerImpl extends ManagerBase implements HAManager, Clust final HAConfig haConfig = haConfigDao.findHAResource(host.getId(), HAResource.ResourceType.Host); if (haConfig != null) { if (haConfig.getState() == HAConfig.HAState.Fenced) { - logger.debug(String.format("HA: Agent [%s] is available/suspect/checking Up.", host.getId())); + logger.debug("HA: Agent [{}] is available/suspect/checking Up.", host); return Status.Down; } else if (haConfig.getState() == HAConfig.HAState.Degraded || haConfig.getState() == HAConfig.HAState.Recovering || haConfig.getState() == HAConfig.HAState.Fencing) { - logger.debug(String.format("HA: Agent [%s] is disconnected. State: %s, %s.", host.getId(), haConfig.getState(), haConfig.getState().getDescription())); + logger.debug("HA: Agent [{}] is disconnected. State: {}, {}.", host, haConfig.getState(), haConfig.getState().getDescription()); return Status.Disconnected; } return Status.Up; diff --git a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java index af76d2d4ae7..5907c1864ad 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java +++ b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java @@ -69,12 +69,12 @@ public abstract class HAAbstractHostProvider extends AdapterBase implements HAPr } @Override - public void fenceSubResources(final Host r) { - if (r.getState() != Status.Down) { + public void fenceSubResources(final Host host) { + if (host.getState() != Status.Down) { try { - logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host id=" + r.getId()); - agentManager.disconnectWithoutInvestigation(r.getId(), Event.HostDown); - oldHighAvailabilityManager.scheduleRestartForVmsOnHost((HostVO)r, true); + logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host {}", host); + agentManager.disconnectWithoutInvestigation(host.getId(), Event.HostDown); + oldHighAvailabilityManager.scheduleRestartForVmsOnHost((HostVO)host, true); } catch (Exception e) { logger.error("Failed to disconnect host and schedule HA restart of VMs after fencing the host: ", e); } diff --git a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java index 9cc65e796a8..6dc7b9281ba 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java +++ b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java @@ -100,7 +100,7 @@ public abstract class BaseHATask implements Callable { logger.warn("Exception occurred while running " + getTaskType() + " on a resource: " + e.getMessage(), e.getCause()); throwable = e.getCause(); } catch (TimeoutException e) { - logger.trace(getTaskType() + " operation timed out for resource id:" + resource.getId()); + logger.trace("{} operation timed out for resource: {}", getTaskType(), resource); } processResult(result, throwable); return result; diff --git a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java index ab05895b8d2..50ec8a827b4 100644 --- a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java @@ -962,10 +962,10 @@ public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements Rou return false; } if (!FirewallRule.Purpose.Firewall.equals(rule.getPurpose())) { - logger.error(String.format("Cannot apply routing firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Firewall)); + logger.error("Cannot apply routing firewall rule: {} as purpose {} is not {}", rule, rule.getPurpose(), FirewallRule.Purpose.Firewall); return false; } - logger.debug(String.format("Applying routing firewall rules for rule with ID: %s", rule.getUuid())); + logger.debug("Applying routing firewall rules for rule: {}", rule); List rules = new ArrayList<>(); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress)); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Ingress)); diff --git a/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java index f05e216f1eb..bbfa83dcf43 100644 --- a/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java @@ -180,8 +180,7 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + - newRule.getSourcePortStart().intValue() + ", instance port " + newRule.getDefaultPortStart() + " is added successfully."); + logger.debug("Load balancer rule {} for Ip address {}, source port {}, instance port {} is added successfully.", newRule, newRule.getSourceIp().addr(), newRule.getSourcePortStart(), newRule.getDefaultPortStart()); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); Network ntwk = _networkModel.getNetwork(newRule.getNetworkId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, newRule.getAccountId(), ntwk.getDataCenterId(), newRule.getId(), null, @@ -524,7 +523,7 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A .intValue())) { throw new NetworkRuleConflictException("The range specified, " + newLbRule.getSourcePortStart().intValue() + "-" + newLbRule.getSourcePortEnd().intValue() + - ", conflicts with rule " + lbRule.getId() + " which has " + lbRule.getSourcePortStart().intValue() + "-" + lbRule.getSourcePortEnd().intValue()); + ", conflicts with rule " + lbRule + " which has " + lbRule.getSourcePortStart().intValue() + "-" + lbRule.getSourcePortEnd().intValue()); } } diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java index 817cfe07e58..debe9eee6da 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java @@ -26,6 +26,7 @@ import com.cloud.dc.Vlan; import com.cloud.network.dao.NetworkDetailVO; import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.dao.NsxProviderDao; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.element.NsxProviderVO; import com.cloud.network.router.VirtualRouter; import com.cloud.storage.DiskOfferingVO; @@ -94,6 +95,7 @@ public class RouterDeploymentDefinition { protected DomainRouterDao routerDao; protected NsxProviderDao nsxProviderDao; protected PhysicalNetworkServiceProviderDao physicalProviderDao; + protected PhysicalNetworkDao pNtwkDao; protected NetworkModel networkModel; protected VirtualRouterProviderDao vrProviderDao; protected NetworkOfferingDao networkOfferingDao; @@ -257,7 +259,7 @@ public class RouterDeploymentDefinition { protected void lock() { final Network lock = networkDao.acquireInLockTable(guestNetwork.getId(), NetworkOrchestrationService.NetworkLockTimeout.value()); if (lock == null) { - throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock network %s", guestNetwork)); } tableLockId = lock.getId(); } @@ -266,7 +268,7 @@ public class RouterDeploymentDefinition { if (tableLockId != null) { networkDao.releaseFromLockTable(tableLockId); if (logger.isDebugEnabled()) { - logger.debug("Lock is released for network id " + tableLockId + " as a part of router startup in " + dest); + logger.debug(String.format("Lock is released for network [id: %d] (%s) as a part of router startup in %s", tableLockId, guestNetwork, dest)); } } } @@ -309,7 +311,7 @@ public class RouterDeploymentDefinition { // If List size is one, we already have a starting or running VR, skip deployment if (virtualRouters.size() == 1) { - logger.debug("Skipping VR deployment: Found a running or starting VR in Pod " + pod.getName() + " id=" + podId); + logger.debug(String.format("Skipping VR deployment: Found a running or starting VR in Pod %s", pod)); continue; } // Add new DeployDestination for this pod @@ -429,7 +431,7 @@ public class RouterDeploymentDefinition { DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); boolean isLocalStorage = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dest.getDataCenter().getId()); if (isLocalStorage == diskOffering.isUseLocalStorage()) { - logger.debug(String.format("Service offering %s (uuid: %s) will be used on virtual router", serviceOffering.getName(), serviceOffering.getUuid())); + logger.debug(String.format("Service offering %s will be used on virtual router", serviceOffering)); serviceOfferingId = serviceOffering.getId(); } } @@ -452,7 +454,7 @@ public class RouterDeploymentDefinition { final PhysicalNetworkServiceProvider provider = physicalProviderDao.findByServiceProvider(physicalNetworkId, type.toString()); if (provider == null) { - throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s", type.toString(), physicalNetworkId)); + throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s", type.toString(), pNtwkDao.findById(physicalNetworkId))); } vrProvider = vrProviderDao.findByNspIdAndType(provider.getId(), type); diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java index 405575c65b1..b9ff50f369d 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java @@ -36,7 +36,6 @@ import com.cloud.network.Network; import com.cloud.network.PhysicalNetwork; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.VirtualRouterProvider.Type; -import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.dao.VpcDao; @@ -50,7 +49,6 @@ public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { protected VpcDao vpcDao; protected VpcOfferingDao vpcOffDao; - protected PhysicalNetworkDao pNtwkDao; protected VpcManager vpcMgr; protected VlanDao vlanDao; @@ -78,7 +76,7 @@ public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { protected void lock() { final Vpc vpcLock = vpcDao.acquireInLockTable(vpc.getId()); if (vpcLock == null) { - throw new ConcurrentOperationException("Unable to lock vpc " + vpc.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock vpc %s", vpc)); } tableLockId = vpcLock.getId(); } @@ -88,7 +86,7 @@ public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { if (tableLockId != null) { vpcDao.releaseFromLockTable(tableLockId); if (logger.isDebugEnabled()) { - logger.debug("Lock is released for vpc id " + tableLockId + " as a part of router startup in " + dest); + logger.debug(String.format("Lock is released for vpc [id: %d] (%s) as a part of router startup in %s", tableLockId, vpc, dest)); } } } @@ -166,7 +164,7 @@ public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { for (final PhysicalNetwork pNtwk : pNtwks) { final PhysicalNetworkServiceProvider provider = physicalProviderDao.findByServiceProvider(pNtwk.getId(), Type.VPCVirtualRouter.toString()); if (provider == null) { - throw new CloudRuntimeException("Cannot find service provider " + Type.VPCVirtualRouter.toString() + " in physical network " + pNtwk.getId()); + throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s", Type.VPCVirtualRouter.toString(), pNtwk)); } vrProvider = vrProviderDao.findByNspIdAndType(provider.getId(), Type.VPCVirtualRouter); if (vrProvider != null) { diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java index 5c1fc5e9ac6..936d9cfb3d6 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java @@ -107,7 +107,7 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology { result = result && routesRules.accept(_advancedVisitor, router); } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending StaticRoute command to the backend"); + logger.debug("Router {} is in {}, so not sending StaticRoute command to the backend", router, router.getState()); } else { logger.warn("Unable to apply StaticRoute, virtual router is not in the right state " + router.getState()); @@ -193,7 +193,7 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology { throws ResourceUnavailableException { if (ipAddresses == null || ipAddresses.isEmpty()) { - logger.debug("No ip association rules to be applied for network " + network.getId()); + logger.debug("No ip association rules to be applied for network {}", network); return true; } @@ -216,7 +216,7 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology { if (result) { if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending NicPlugInOutRules command to the backend"); + logger.debug("Router {} is in {}, so not sending NicPlugInOutRules command to the backend", router, router.getState()); } else { _advancedVisitor.visit(nicPlugInOutRules); } @@ -230,7 +230,7 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology { throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - logger.debug("No network ACLs to be applied for network " + network.getId()); + logger.debug("No network ACLs to be applied for network {}", network); return true; } @@ -261,7 +261,7 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology { if (router.getState() == State.Running) { result = bgpPeersRules.accept(_advancedVisitor, router); } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending BgpPeer command to the backend"); + logger.debug("Router {} is in {}, so not sending BgpPeer command to the backend", router, router.getState()); } else { logger.warn("Unable to apply BgpPeer, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply BgpPeer on the backend," + " virtual router is not in the right state", DataCenter.class, diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java index 65d702b7138..a7000f702ec 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java @@ -192,7 +192,7 @@ public class BasicNetworkTopology implements NetworkTopology { throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - logger.debug("No lb rules to be applied for network " + network.getId()); + logger.debug("No lb rules to be applied for network {}", network); return true; } @@ -212,7 +212,7 @@ public class BasicNetworkTopology implements NetworkTopology { public boolean applyFirewallRules(final Network network, final List rules, final VirtualRouter router) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - logger.debug("No firewall rules to be applied for network " + network.getId()); + logger.debug("No firewall rules to be applied for network {}", network); return true; } @@ -231,7 +231,7 @@ public class BasicNetworkTopology implements NetworkTopology { @Override public boolean applyStaticNats(final Network network, final List rules, final VirtualRouter router) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - logger.debug("No static nat rules to be applied for network " + network.getId()); + logger.debug("No static nat rules to be applied for network {}", network); return true; } @@ -251,7 +251,7 @@ public class BasicNetworkTopology implements NetworkTopology { public boolean associatePublicIP(final Network network, final List ipAddress, final VirtualRouter router) throws ResourceUnavailableException { if (ipAddress == null || ipAddress.isEmpty()) { - logger.debug("No ip association rules to be applied for network " + network.getId()); + logger.debug("No ip association rules to be applied for network {}", network); return true; } @@ -271,7 +271,7 @@ public class BasicNetworkTopology implements NetworkTopology { public String[] applyVpnUsers(final Network network, final List users, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { logger.warn("Failed to add/remove VPN users: no router found for account and zone"); - throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); + throw new ResourceUnavailableException(String.format("Unable to assign ip addresses, domR doesn't exist for network %s", network), DataCenter.class, network.getDataCenterId()); } logger.debug("APPLYING BASIC VPN RULES"); @@ -281,7 +281,7 @@ public class BasicNetworkTopology implements NetworkTopology { for (final DomainRouterVO router : routers) { if(router.getState() == State.Stopped || router.getState() == State.Stopping){ - logger.info("The router " + router.getInstanceName()+ " is in the " + router.getState() + " state. So not applying the VPN rules. Will be applied once the router gets restarted."); + logger.info("The router {} is in the {} state. So not applying the VPN rules. Will be applied once the router gets restarted.", router, router.getState()); continue; } else if (router.getState() != State.Running) { @@ -359,7 +359,7 @@ public class BasicNetworkTopology implements NetworkTopology { final boolean failWhenDisconnect, final RuleApplierWrapper ruleApplierWrapper) throws ResourceUnavailableException { if (router == null) { - logger.warn("Unable to apply " + typeString + ", virtual router doesn't exist in the network " + network.getId()); + logger.warn("Unable to apply {}, virtual router doesn't exist in the network {}", typeString, network); throw new ResourceUnavailableException("Unable to apply " + typeString, DataCenter.class, network.getDataCenterId()); } @@ -383,7 +383,7 @@ public class BasicNetworkTopology implements NetworkTopology { throw new ResourceUnavailableException("Unable to process due to the stop pending router " + router.getInstanceName() + " haven't been stopped after it's host coming back!", DataCenter.class, router.getDataCenterId()); } - logger.debug("Router " + router.getInstanceName() + " is stop pending, so not sending apply " + typeString + " commands to the backend"); + logger.debug("Router {} is stop pending, so not sending apply {} commands to the backend", router, typeString); return false; } @@ -391,7 +391,7 @@ public class BasicNetworkTopology implements NetworkTopology { result = ruleApplier.accept(getVisitor(), router); connectedRouters.add(router); } catch (final AgentUnavailableException e) { - logger.warn(msg + router.getInstanceName(), e); + logger.warn("{}{}", msg, router, e); disconnectedRouters.add(router); } @@ -405,7 +405,7 @@ public class BasicNetworkTopology implements NetworkTopology { } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending apply " + typeString + " commands to the backend"); + logger.debug("Router {} is in {}, so not sending apply {} commands to the backend", router, router.getState(), typeString); } else { logger.warn("Unable to apply " + typeString + ", virtual router is not in the right state " + router.getState()); if (isZoneBasic && isPodLevelException) { @@ -429,7 +429,7 @@ public class BasicNetworkTopology implements NetworkTopology { } } else if (!disconnectedRouters.isEmpty()) { if (logger.isDebugEnabled()) { - logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + logger.debug("{}{}", msg, router); } if (isZoneBasic && isPodLevelException) { throw new ResourceUnavailableException(msg, Pod.class, podId); diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java index 02600b87f29..e4481dab548 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java @@ -29,6 +29,8 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.response.OutOfBandManagementResponse; import org.apache.cloudstack.context.CallContext; @@ -73,9 +75,13 @@ import com.google.common.collect.ImmutableMap; @Component public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOfBandManagementService, Manager, Configurable { + @Inject + private ClusterDao clusterDao; @Inject private ClusterDetailsDao clusterDetailsDao; @Inject + private DataCenterDao dataCenterDao; + @Inject private DataCenterDetailsDao dataCenterDetailsDao; @Inject private OutOfBandManagementDao outOfBandManagementDao; @@ -191,8 +197,8 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf if (sentCount != null && sentCount <= 0) { boolean concurrentUpdateResult = hostAlertCache.asMap().replace(host.getId(), sentCount, sentCount+1L); if (concurrentUpdateResult) { - final String subject = String.format("Out-of-band management auth-error detected for %s in cluster [id: %d] and zone [id: %d].", host, host.getClusterId(), host.getDataCenterId()); - logger.error(subject + ": " + message); + final String subject = String.format("Out-of-band management auth-error detected for %s in cluster [%s] and zone [%s].", host, clusterDao.findById(host.getClusterId()), dataCenterDao.findById(host.getDataCenterId())); + logger.error("{}: {}", subject, message); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR, host.getDataCenterId(), host.getPodId(), subject, message); } } @@ -254,7 +260,7 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf Host host = hostDao.findById(hostId); if (host == null || host.getResourceState() == ResourceState.Degraded) { String state = host != null ? String.valueOf(host.getResourceState()) : null; - logger.debug(String.format("Host [id=%s, state=%s] was removed or placed in Degraded state by the Admin.", hostId, state)); + logger.debug("Host [id={}, uuid={}, state={}] was removed or placed in Degraded state by the Admin.", hostId, host != null ? host.getUuid() : "", state); return false; } @@ -474,7 +480,7 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf try { driverResponse = driver.execute(changePasswordCmd); } catch (Exception e) { - logger.error("Out-of-band management change password failed due to driver error: " + e.getMessage()); + logger.error("Out-of-band management change password for {} failed due to driver error: {}", host, e.getMessage()); throw new CloudRuntimeException(String.format("Failed to change out-of-band management password for %s due to driver error: %s", host, e.getMessage())); } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java index 487a11c9752..577164d4c0d 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java @@ -42,7 +42,7 @@ public class PowerOperationTask implements Runnable { @Override public String toString() { - return String.format("[OOBM Task] Power operation:%s on Host:%d(%s)", powerOperation, host.getId(), host.getName()); + return String.format("[OOBM Task] Power operation: %s on Host: %s", powerOperation, host); } @Override @@ -50,8 +50,7 @@ public class PowerOperationTask implements Runnable { try { service.executePowerOperation(host, powerOperation, null); } catch (Exception e) { - logger.warn(String.format("Out-of-band management background task operation=%s for host %s failed with: %s", - powerOperation.name(), host.getName(), e.getMessage())); + logger.warn("Out-of-band management background task operation={} for host {} failed with: {}", powerOperation.name(), host, e.getMessage()); String eventMessage = String .format("Error while issuing out-of-band management action %s for host: %s", powerOperation.name(), host.getName()); diff --git a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index 3680c869eb1..a448c612ece 100644 --- a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -24,6 +24,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd; @@ -79,6 +80,8 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR @Inject GlobalLoadBalancerLbRuleMapDao _gslbLbMapDao; @Inject + DataCenterDao zoneDao; + @Inject RegionDao _regionDao; @Inject RulesManager _rulesMgr; @@ -160,7 +163,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } }); - logger.debug("successfully created new global load balancer rule for the account " + gslbOwner.getId()); + logger.debug("successfully created new global load balancer rule for the account {}", gslbOwner); return newGslbRule; } @@ -284,7 +287,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR // apply the gslb rule on to the back end gslb service providers on zones participating in gslb if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - logger.warn("Failed to add load balancer rules " + newLbRuleIds + " to global load balancer rule id " + gslbRuleId); + logger.warn("Failed to add load balancer rules {} to global load balancer rule {}", newLbRuleIds, gslbRule); CloudRuntimeException ex = new CloudRuntimeException("Failed to add load balancer rules to GSLB rule "); throw ex; } @@ -387,7 +390,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR // apply the gslb rule on to the back end gslb service providers if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - logger.warn("Failed to remove load balancer rules " + lbRuleIdsToremove + " from global load balancer rule id " + gslbRuleId); + logger.warn("Failed to remove load balancer rules {} from global load balancer rule {}", lbRuleIdsToremove, gslbRule); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove load balancer rule ids from GSLB rule "); throw ex; } @@ -447,7 +450,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR if (gslbRule.getState() == com.cloud.region.ha.GlobalLoadBalancerRule.State.Staged) { if (logger.isDebugEnabled()) { - logger.debug("Rule Id: " + gslbRuleId + " is still in Staged state so just removing it."); + logger.debug("Rule: {} is still in Staged state so just removing it.", gslbRule); } _gslbRuleDao.remove(gslbRuleId); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE, gslbRule.getAccountId(), 0, gslbRule.getId(), gslbRule.getName(), @@ -542,7 +545,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR _gslbRuleDao.update(gslbRule.getId(), gslbRule); try { - logger.debug("Updating global load balancer with id " + gslbRule.getUuid()); + logger.debug("Updating global load balancer {}", gslbRule); // apply the gslb rule on to the back end gslb service providers on zones participating in gslb applyGlobalLoadBalancerRuleConfig(gslbRuleId, false); @@ -697,14 +700,14 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } @Override - public boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException { - List gslbRules = _gslbRuleDao.listByAccount(accountId); + public boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException { + List gslbRules = _gslbRuleDao.listByAccount(account.getId()); if (gslbRules != null && !gslbRules.isEmpty()) { for (GlobalLoadBalancerRule gslbRule : gslbRules) { revokeGslbRule(gslbRule.getId(), caller); } } - logger.debug("Successfully cleaned up GSLB rules for account id=" + accountId); + logger.debug("Successfully cleaned up GSLB rules for account {}", account); return true; } diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index 64d9b3467e3..70808458b3c 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -92,30 +92,29 @@ public class SnapshotHelper { * @param snapInfo the snapshot info to delete. */ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { - if (!kvmSnapshotOnlyInPrimaryStorage) { - if (snapInfo != null) { - logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); - } - return; - } - if (snapInfo == null) { logger.warn("Unable to expunge snapshot due to its info is null."); return; } + if (!kvmSnapshotOnlyInPrimaryStorage) { + logger.trace("Snapshot [{}] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getSnapshotVO()); + return; + } + if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) { - logger.debug(String.format("Expunge template for Snapshot [%s] is called for primary storage role. Not expunging it, " + - "but we will still expunge the database reference of the snapshot for image storage role if any", snapInfo.getId())); + logger.debug("Expunge template for Snapshot [{}] is called for primary storage role. Not expunging it, " + + "but we will still expunge the database reference of the snapshot for image storage role if any", snapInfo.getSnapshotVO()); } else { - logger.debug(String.format("Expunging snapshot [%s] due to it is a temporary backup to create a volume from snapshot. It is occurring because the global setting [%s]" - + " has the value [%s].", snapInfo.getId(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot)); + logger.debug("Expunging snapshot [{}] due to it is a temporary backup to create a volume from snapshot." + + " It is occurring because the global setting [{}] has the value [{}].", + snapInfo.getSnapshotVO(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot); try { snapshotService.deleteSnapshot(snapInfo); } catch (CloudRuntimeException ex) { - logger.warn(String.format("Unable to delete the temporary snapshot [%s] on secondary storage due to [%s]. We still will expunge the database reference, consider" - + " manually deleting the file [%s].", snapInfo.getId(), ex.getMessage(), snapInfo.getPath()), ex); + logger.warn("Unable to delete the temporary snapshot [{}] on secondary storage due to [{}]. We still will expunge the database reference, consider" + + " manually deleting the file [{}].", snapInfo, ex.getMessage(), snapInfo.getPath(), ex); } } @@ -136,7 +135,7 @@ public class SnapshotHelper { public SnapshotInfo backupSnapshotToSecondaryStorageIfNotExists(SnapshotInfo snapInfo, DataStoreRole dataStoreRole, Snapshot snapshot, boolean kvmSnapshotOnlyInPrimaryStorage) throws CloudRuntimeException { if (!isSnapshotBackupable(snapInfo, dataStoreRole, kvmSnapshotOnlyInPrimaryStorage)) { logger.trace(String.format("Snapshot [%s] is already on secondary storage or is not a KVM snapshot that is only kept in primary storage. Therefore, we do not back it up." - + " up.", snapInfo.getId())); + + " up.", snapInfo.getSnapshotVO())); return snapInfo; } diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java index 267d813364b..21a34de0d23 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.storage.heuristics; import com.cloud.api.ApiDBUtils; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; import com.cloud.storage.StorageManager; @@ -74,6 +75,9 @@ public class HeuristicRuleHelper { @Inject private AccountDao accountDao; + @Inject + private DataCenterDao zoneDao; + /** * Returns the {@link DataStore} object if the zone, specified by the ID, has an active heuristic rule for the given {@link HeuristicType}. * It returns null otherwise. @@ -87,10 +91,10 @@ public class HeuristicRuleHelper { HeuristicVO heuristicsVO = secondaryStorageHeuristicDao.findByZoneIdAndType(zoneId, heuristicType); if (heuristicsVO == null) { - logger.debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", zoneId, heuristicType)); + logger.debug("No heuristic rules found for zone [{}] and heuristic type [{}]. Returning null.", () -> zoneDao.findById(zoneId), heuristicType::toString); return null; } else { - logger.debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicsVO, zoneId)); + logger.debug("Found the heuristic rule {} to apply for zone [{}].", heuristicsVO::toString, () -> zoneDao.findById(zoneId)); return interpretHeuristicRule(heuristicsVO.getHeuristicRule(), heuristicType, obj, zoneId); } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java index f8ded3a864a..28d4327954e 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java @@ -38,6 +38,8 @@ public class GenericHeuristicPresetVariable { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, fieldNamesToIncludeInToString.toArray(new String[0])); + return String.format("GenericHeuristicPresetVariable %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, fieldNamesToIncludeInToString.toArray(new String[0]))); } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java index 58b41d6a55d..389ca52b03b 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java @@ -117,7 +117,7 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object); try { if(!objectStore.createUser(ownerId)) { - logger.error("Failed to create user in objectstore "+ objectStore.getName()); + logger.error("Failed to create user in objectstore {}", objectStore); return null; } } catch (CloudRuntimeException e) { diff --git a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java index 072f7d4cd3e..4f0aabd3f37 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java @@ -179,8 +179,8 @@ public class SharedFSServiceImpl extends ManagerBase implements SharedFSService, try { return sharedFSStateMachine.transitTo(sharedFS, event, null, sharedFSDao); } catch (NoTransitionException e) { - String message = String.format("State transit error for Shared FileSystem %s [%s] due to exception: %s.", - sharedFS.getName(), sharedFS.getId(), e.getMessage()); + String message = String.format("State transit error for Shared FileSystem %s due to exception: %s.", + sharedFS, e.getMessage()); logger.error(message, e); throw new CloudRuntimeException(message, e); } @@ -706,7 +706,7 @@ public class SharedFSServiceImpl extends ManagerBase implements SharedFSService, deleteSharedFS(sharedFS.getId()); } catch (Exception e) { stateTransitTo(sharedFS, Event.OperationFailed); - logger.error(String.format("Unable to expunge Shared FileSystem [%s] due to: [%s].", sharedFS.getUuid(), e.getMessage())); + logger.error("Unable to expunge Shared FileSystem {} due to: [{}].", sharedFS, e.getMessage()); } } } finally { diff --git a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java index 6a34ca2d0e5..ef0f6f6b226 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java @@ -268,18 +268,18 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.StaticNat)) { - logger.info(String.format("Network ID: %s does not support static nat, " + - "skipping this network configuration for VNF appliance", network.getUuid())); + logger.info("Network: {} does not support static nat, " + + "skipping this network configuration for VNF appliance", network); continue; } if (network.getVpcId() != null) { - logger.info(String.format("Network ID: %s is a VPC tier, " + - "skipping this network configuration for VNF appliance", network.getUuid())); + logger.info("Network: {} is a VPC tier, " + + "skipping this network configuration for VNF appliance", network); continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.Firewall)) { - logger.info(String.format("Network ID: %s does not support firewall, " + - "skipping this network configuration for VNF appliance", network.getUuid())); + logger.info("Network: {} does not support firewall, " + + "skipping this network configuration for VNF appliance", network); continue; } networkAndIpMap.put(network, nic.getIPv4Address()); @@ -326,7 +326,7 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa Set ports = getOpenPortsForVnfAppliance(template); for (Map.Entry entry : networkAndIpMap.entrySet()) { Network network = entry.getKey(); - logger.debug("Creating network rules for VNF appliance on isolated network " + network.getUuid()); + logger.debug("Creating network rules for VNF appliance on isolated network {}", network); String ip = entry.getValue(); IpAddress publicIp = networkService.allocateIP(owner, zone.getId(), network.getId(), null, null); if (publicIp == null) { @@ -367,7 +367,7 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa }); firewallService.applyIngressFwRules(publicIp.getId(), owner); } - logger.debug("Created network rules for VNF appliance on isolated network " + network.getUuid()); + logger.debug("Created network rules for VNF appliance on isolated network {}", network); } } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java index a8167315143..aac5d1277a6 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java @@ -207,7 +207,7 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ volumeApiService.validateCustomDiskOfferingSizeRange(volume.getVirtualSize() / ByteScaleUtils.GiB); } if (!volumeApiService.doesTargetStorageSupportDiskOffering(pool, diskOffering.getTags())) { - logFailureAndThrowException(String.format("Disk offering: %s storage tags are not compatible with selected storage pool: %s", diskOffering.getUuid(), pool.getUuid())); + logFailureAndThrowException(String.format("Disk offering: %s storage tags are not compatible with selected storage pool: %s", diskOffering, pool)); } // 7. create records @@ -249,7 +249,7 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ GetVolumesOnStorageCommand command = new GetVolumesOnStorageCommand(storageTO, volumePath, keyword); Answer answer = agentManager.easySend(host.getId(), command); if (answer == null || !(answer instanceof GetVolumesOnStorageAnswer)) { - logFailureAndThrowException("Cannot get volumes on storage pool via host " + host.getName()); + logFailureAndThrowException(String.format("Cannot get volumes on storage pool via host %s", host)); } if (!answer.getResult()) { logFailureAndThrowException("Volume cannot be imported due to " + answer.getDetails()); @@ -286,10 +286,10 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ logFailureAndThrowException(String.format("Storage pool (ID: %s) does not exist", poolId)); } if (pool.isInMaintenance()) { - logFailureAndThrowException(String.format("Storage pool (name: %s) is in maintenance", pool.getName())); + logFailureAndThrowException(String.format("Storage pool %s is in maintenance", pool)); } if (!StoragePoolStatus.Up.equals(pool.getStatus())) { - logFailureAndThrowException(String.format("Storage pool (ID: %s) is not Up: %s", pool.getName(), pool.getStatus())); + logFailureAndThrowException(String.format("Storage pool %s is not Up: %s", pool, pool.getStatus())); } return pool; } @@ -298,7 +298,7 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ List hosts = new ArrayList<>(); switch (pool.getScope()) { case HOST: - return findHostAndLocalPathForVolumeImportForHostScope(pool.getId()); + return findHostAndLocalPathForVolumeImportForHostScope(pool); case CLUSTER: hosts = hostDao.findHypervisorHostInCluster((pool.getClusterId())); break; @@ -316,8 +316,8 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ return null; } - private Pair findHostAndLocalPathForVolumeImportForHostScope(Long poolId) { - List storagePoolHostVOs = storagePoolHostDao.listByPoolId(poolId); + private Pair findHostAndLocalPathForVolumeImportForHostScope(StoragePoolVO pool) { + List storagePoolHostVOs = storagePoolHostDao.listByPoolId(pool.getId()); if (CollectionUtils.isNotEmpty(storagePoolHostVOs)) { for (StoragePoolHostVO storagePoolHostVO : storagePoolHostVOs) { HostVO host = hostDao.findById(storagePoolHostVO.getHostId()); @@ -326,7 +326,7 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ } } } - logFailureAndThrowException("No host found to perform volume import on pool: " + poolId); + logFailureAndThrowException(String.format("No host found to perform volume import on pool: %s", pool)); return null; } @@ -408,20 +408,20 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ logFailureAndThrowException(String.format("Disk offering %s does not exist", diskOfferingId)); } if (!DiskOffering.State.Active.equals(diskOfferingVO.getState())) { - logFailureAndThrowException(String.format("Disk offering with ID %s is not active", diskOfferingId)); + logFailureAndThrowException(String.format("Disk offering %s is not active", diskOfferingVO)); } if (diskOfferingVO.isUseLocalStorage() != isLocal) { - logFailureAndThrowException(String.format("Disk offering with ID %s should use %s storage", diskOfferingId, isLocal ? "local": "shared")); + logFailureAndThrowException(String.format("Disk offering %s should use %s storage", diskOfferingVO, isLocal ? "local": "shared")); } if (diskOfferingVO.getEncrypt()) { - logFailureAndThrowException(String.format("Disk offering with ID %s should not support volume encryption", diskOfferingId)); + logFailureAndThrowException(String.format("Disk offering %s should not support volume encryption", diskOfferingVO)); } // check if disk offering is accessible by the account/owner try { configMgr.checkDiskOfferingAccess(owner, diskOfferingVO, dcDao.findById(zoneId)); return diskOfferingVO; } catch (PermissionDeniedException ex) { - logFailureAndThrowException(String.format("Disk offering with ID %s is not accessible by owner %s", diskOfferingId, owner)); + logFailureAndThrowException(String.format("Disk offering %s is not accessible by owner %s", diskOfferingVO, owner)); } } return getOrCreateDefaultDiskOfferingIdForVolumeImport(isLocal); @@ -462,7 +462,7 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.volume); resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.primary_storage, volumeSize); } catch (ResourceAllocationException e) { - logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error("VM resource allocation error for account: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } } @@ -482,7 +482,7 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ volumeVO.getId(), volumeVO.getName(), volumeVO.getDiskOfferingId(), null, volumeVO.getSize(), Volume.class.getName(), volumeVO.getUuid(), volumeVO.isDisplayVolume()); } catch (Exception e) { - logger.error(String.format("Failed to publish volume ID: %s event or usage records during volume import/unmanage", volumeVO.getUuid()), e); + logger.error("Failed to publish volume: {} event or usage records during volume import/unmanage", volumeVO, e); } } @@ -497,13 +497,13 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ logFailureAndThrowException(String.format("Volume (ID: %s) does not exist", volumeId)); } if (!Volume.State.Ready.equals(volumeVO.getState())) { - logFailureAndThrowException(String.format("Volume (ID: %s) is not ready", volumeId)); + logFailureAndThrowException(String.format("Volume %s is not ready", volumeVO)); } if (volumeVO.getEncryptFormat() != null) { - logFailureAndThrowException(String.format("Volume (ID: %s) is encrypted", volumeId)); + logFailureAndThrowException(String.format("Volume %s is encrypted", volumeVO)); } if (volumeVO.getAttached() != null || volumeVO.getInstanceId() != null) { - logFailureAndThrowException(String.format("Volume (ID: %s) is attached to VM (ID: %s)", volumeId, volumeVO.getInstanceId())); + logFailureAndThrowException(String.format("Volume %s is attached to VM (ID: %s)", volumeVO, volumeVO.getInstanceId())); } return volumeVO; } diff --git a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java index f35f69fb8bf..6574489c827 100644 --- a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java @@ -192,11 +192,8 @@ public class UserPasswordResetManagerImpl extends ManagerBase implements UserPas mailProperties.setRecipients(addresses); mailSender.sendMail(mailProperties); - logger.debug(String.format( - "User password reset email for user id: %d username: %s account id: %d" + - " domain id:%d sent to %s with token expiry at %s", - userAccount.getId(), username, userAccount.getAccountId(), - userAccount.getDomainId(), email, resetTokenExpiryTime)); + logger.debug("User password reset email for user {} account id: {} domain id: {} sent to {} with token expiry at {}", + userAccount, userAccount.getAccountId(), userAccount.getDomainId(), email, resetTokenExpiryTime); } @Override @@ -205,10 +202,8 @@ public class UserPasswordResetManagerImpl extends ManagerBase implements UserPas UserDetailVO resetTokenExpiryDate = userDetailsDao.findDetail(user.getId(), PasswordResetTokenExpiryDate); if (resetTokenDetail == null || resetTokenExpiryDate == null) { - logger.debug(String.format( - "Failed to reset password. No reset token found for user id: %d username: %s account" + - " id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. No reset token found for user {} account" + + " id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("No reset token found for user %s", user.getUsername())); } @@ -217,31 +212,23 @@ public class UserPasswordResetManagerImpl extends ManagerBase implements UserPas Date now = new Date(); String resetToken = resetTokenDetail.getValue(); if (StringUtils.isEmpty(resetToken)) { - logger.debug(String.format( - "Failed to reset password. No reset token found for user id: %d username: %s account" + - " id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. No reset token found for user {} account" + + " id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("No reset token found for user %s", user.getUsername())); } if (!resetToken.equals(token)) { - logger.debug(String.format( - "Failed to reset password. Invalid reset token for user id: %d username: %s " + - "account id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. Invalid reset token for user {} " + + "account id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Invalid reset token for user %s", user.getUsername())); } if (now.after(resetTokenExpiryTime)) { - logger.debug(String.format( - "Failed to reset password. Reset token has expired for user id: %d username: %s " + - "account id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. Reset token has expired for user {} " + + "account id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Reset token has expired for user %s", user.getUsername())); } resetPassword(user, password); - logger.debug(String.format( - "Password reset successful for user id: %d username: %s account id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Password reset successful for user {} account id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); return true; } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 13919b04f61..d8b6acccd03 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -426,7 +426,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } } } catch (Exception e) { - logger.warn(String.format("Unable to find volume file name for volume ID: %s while adding filters unmanaged VMs", volumeVO.getUuid()), e); + logger.warn("Unable to find volume file name for volume: {} while adding filters unmanaged VMs", volumeVO, e); } if (!volumeFileNames.isEmpty()) { additionalNameFilter.addAll(volumeFileNames); @@ -488,7 +488,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { cpuSpeed = Integer.parseInt(details.get(VmDetailConstants.CPU_SPEED)); } catch (Exception e) { - logger.error(String.format("Failed to get CPU speed for importing VM [%s] due to [%s].", instance.getName(), e.getMessage()), e); + logger.error("Failed to get CPU speed for importing VM [{}] due to [{}].", instance, e.getMessage(), e); } } Map parameters = new HashMap<>(); @@ -890,7 +890,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { networkOrchestrationService.release(profile, true); } catch (Exception e) { - logger.error(String.format("Unable to release NICs for unsuccessful import unmanaged VM: %s", userVm.getInstanceName()), e); + logger.error("Unable to release NICs for unsuccessful import unmanaged VM: {}", userVm, e); nicDao.removeNicsForInstance(userVm.getId()); } // Remove vm @@ -904,12 +904,12 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during VM import")); } if (sourceHost == null || serviceOffering == null || diskProfileStoragePoolList == null) { - logger.error(String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); + logger.error(String.format("Failed to check migrations need during import, VM: %s", userVm)); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); } if (!hostSupportsServiceOfferingAndTemplate(sourceHost, serviceOffering, template)) { - logger.debug(String.format("VM %s needs to be migrated", vm.getUuid())); + logger.debug("VM {} needs to be migrated", vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, template, serviceOffering, owner, null); profile.setServiceOffering(serviceOfferingDao.findById(vm.getId(), serviceOffering.getId())); DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList(); @@ -919,7 +919,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm.getInstanceName(), e.getMessage()); + String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm, e.getMessage()); logger.warn(errorMsg, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); @@ -939,7 +939,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } vm = userVmManager.getUserVm(vm.getId()); } catch (Exception e) { - String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm.getInstanceName(), e.getMessage()); + String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm, e.getMessage()); logger.error(errorMsg, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); @@ -964,7 +964,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (poolSupportsOfferings) { continue; } - logger.debug(String.format("Volume %s needs to be migrated", volumeVO.getUuid())); + logger.debug("Volume {} needs to be migrated", volumeVO); Pair, List> poolsPair = managementService.listStoragePoolsForSystemMigrationOfVolume(profile.getVolumeId(), null, null, null, null, false, true); if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) { cleanupFailedImportVM(vm); @@ -998,7 +998,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume ID: %s migration as no suitable pool found", userVm.getInstanceName(), volumeVO.getUuid())); } else { - logger.debug(String.format("Found storage pool %s(%s) for migrating the volume %s to", storagePool.getName(), storagePool.getUuid(), volumeVO.getUuid())); + logger.debug("Found storage pool {} for migrating the volume {} to", storagePool, volumeVO); } try { Volume volume = null; @@ -1010,15 +1010,15 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (volume == null) { String msg = ""; if (vm.getState().equals(VirtualMachine.State.Running)) { - msg = String.format("Live migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid()); + msg = String.format("Live migration for volume: %s to destination pool: %s failed", volumeVO, storagePool); } else { - msg = String.format("Migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid()); + msg = String.format("Migration for volume: %s to destination pool: %s failed", volumeVO, storagePool); } logger.error(msg); throw new CloudRuntimeException(msg); } } catch (Exception e) { - logger.error(String.format("VM import failed for unmanaged vm: %s during volume migration", vm.getInstanceName()), e); + logger.error("VM import failed for unmanaged vm: {} during volume migration", vm, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume migration. %s", userVm.getInstanceName(), StringUtils.defaultString(e.getMessage()))); } @@ -1028,7 +1028,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOfferingVO serviceOfferingVO, VirtualMachineTemplate templateVO) { if (userVm == null || serviceOfferingVO == null) { - logger.error(String.format("Failed to publish usage records during VM import because VM [%s] or ServiceOffering [%s] is null.", userVm, serviceOfferingVO)); + logger.error("Failed to publish usage records during VM import because VM [{}] or ServiceOffering [{}] is null.", userVm, serviceOfferingVO); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "VM import failed for Unmanaged VM during publishing Usage Records."); } @@ -1045,7 +1045,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { userVm.getHypervisorType().toString(), VirtualMachine.class.getName(), userVm.getUuid(), userVm.isDisplayVm()); } } catch (Exception e) { - logger.error(String.format("Failed to publish usage records during VM import for unmanaged VM [%s] due to [%s].", userVm.getInstanceName(), e.getMessage()), e); + logger.error("Failed to publish usage records during VM import for unmanaged VM [{}] due to [{}].", userVm, e.getMessage(), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm %s during publishing usage records", userVm.getInstanceName())); } @@ -1057,7 +1057,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), null, volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume()); } catch (Exception e) { - logger.error(String.format("Failed to publish volume ID: %s usage records during VM import", volume.getUuid()), e); + logger.error("Failed to publish volume ID: {} usage records during VM import", volume, e); } resourceLimitService.incrementVolumeResourceCount(userVm.getAccountId(), volume.isDisplayVolume(), volume.getSize(), diskOfferingDao.findById(volume.getDiskOfferingId())); @@ -1121,7 +1121,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details, cluster.getHypervisorType()); } catch (Exception e) { - String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance.getName(), serviceOffering.getUuid(), StringUtils.defaultIfEmpty(e.getMessage(), "")); + String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance, serviceOffering, StringUtils.defaultIfEmpty(e.getMessage(), "")); logger.error(errorMsg, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } @@ -1174,7 +1174,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } checkUnmanagedDiskLimits(owner, rootDisk, serviceOffering, dataDisks, dataDiskOfferingMap); } catch (ResourceAllocationException e) { - logger.error(String.format("Volume resource allocation error for owner: %s", owner.getUuid()), e); + logger.error("Volume resource allocation error for owner: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume resource allocation error for owner: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } // Check NICs and supplied networks @@ -1462,7 +1462,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); } catch (ResourceAllocationException e) { - logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error("VM resource allocation error for account: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } } @@ -1643,8 +1643,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { HostVO convertHost = selectKVMHostForConversionInCluster(destinationCluster, convertInstanceHostId); HostVO importHost = selectKVMHostForImportingInCluster(destinationCluster, importInstanceHostId); CheckConvertInstanceAnswer conversionSupportAnswer = checkConversionSupportOnHost(convertHost, sourceVMName, false); - logger.debug(String.format("The host %s (%s) is selected to execute the conversion of the instance %s" + - " from VMware to KVM ", convertHost.getId(), convertHost.getName(), sourceVMName)); + logger.debug("The host {} is selected to execute the conversion of the " + + "instance {} from VMware to KVM ", convertHost, sourceVMName); temporaryConvertLocation = selectInstanceConversionTemporaryLocation( destinationCluster, convertHost, convertStoragePoolId); @@ -1741,8 +1741,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { private void checkUnmanagedNicAndNetworkMacAddressForImport(NetworkVO network, UnmanagedInstanceTO.Nic nic, boolean forced) { NicVO existingNic = nicDao.findByNetworkIdAndMacAddress(network.getId(), nic.getMacAddress()); if (existingNic != null && !forced) { - String err = String.format("NIC with MAC address %s already exists on network with ID %s and forced flag is disabled. " + - "Retry with forced flag enabled if a new MAC address to be generated.", nic.getMacAddress(), network.getUuid()); + String err = String.format("NIC %s with MAC address %s already exists on network %s and forced flag is disabled. " + + "Retry with forced flag enabled if a new MAC address to be generated.", nic, nic.getMacAddress(), network); logger.error(err); throw new CloudRuntimeException(err); } @@ -1841,19 +1841,19 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } else if (selectedHost.getResourceState() != ResourceState.Enabled) { err = String.format( "Cannot import the converted instance on the host %s as it is not in Enabled state", - selectedHost.getName()); + selectedHost); } else if (selectedHost.getStatus() != Status.Up) { err = String.format( "Cannot import the converted instance on the host %s as it is not running", - selectedHost.getName()); + selectedHost); } else if (selectedHost.getType() != Host.Type.Routing) { err = String.format( "Cannot import the converted instance on the host %s as it is not a routing host", - selectedHost.getName()); + selectedHost); } else if (destinationCluster.getId() != selectedHost.getClusterId()) { err = String.format( "Cannot import the converted instance on the host %s as it is not in the same cluster as the destination cluster", - selectedHost.getName()); + selectedHost); } if (err != null) { @@ -1870,7 +1870,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { String err = String.format( "Could not find any suitable %s host in cluster %s to import the converted instance", - destinationCluster.getHypervisorType(), destinationCluster.getName()); + destinationCluster.getHypervisorType(), destinationCluster); logger.error(err); throw new CloudRuntimeException(err); } @@ -1885,19 +1885,19 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } else if (!List.of(ResourceState.Enabled, ResourceState.Disabled).contains(selectedHost.getResourceState())) { err = String.format( "Cannot perform the conversion on the host %s as the host is in %s state", - selectedHost.getName(), selectedHost.getResourceState()); + selectedHost, selectedHost.getResourceState()); } else if (selectedHost.getStatus() != Status.Up) { err = String.format( "Cannot perform the conversion on the host %s as it is not running", - selectedHost.getName()); + selectedHost); } else if (selectedHost.getType() != Host.Type.Routing) { err = String.format( "Cannot perform the conversion on the host %s as it is not a routing host", - selectedHost.getName()); + selectedHost); } else if (destinationCluster.getDataCenterId() != selectedHost.getDataCenterId()) { err = String.format( "Cannot perform the conversion on the host %s as it is not in the same zone as the destination cluster", - selectedHost.getName()); + selectedHost); } if (err != null) { logger.error(err); @@ -1919,13 +1919,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } String err = String.format("Could not find any suitable %s host in cluster %s to perform the instance conversion", - destinationCluster.getHypervisorType(), destinationCluster.getName()); + destinationCluster.getHypervisorType(), destinationCluster); logger.error(err); throw new CloudRuntimeException(err); } private CheckConvertInstanceAnswer checkConversionSupportOnHost(HostVO convertHost, String sourceVM, boolean checkWindowsGuestConversionSupport) { - logger.debug(String.format("Checking the %s conversion support on the host %s (%s)", checkWindowsGuestConversionSupport? "windows guest" : "", convertHost.getId(), convertHost.getName())); + logger.debug(String.format("Checking the %s conversion support on the host %s", checkWindowsGuestConversionSupport? "windows guest" : "", convertHost)); CheckConvertInstanceCommand cmd = new CheckConvertInstanceCommand(checkWindowsGuestConversionSupport); int timeoutSeconds = 60; cmd.setWait(timeoutSeconds); @@ -1935,14 +1935,14 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { checkConvertInstanceAnswer = (CheckConvertInstanceAnswer) agentManager.send(convertHost.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { String err = String.format("Failed to check %s conversion support on the host %s for converting instance %s from VMware to KVM due to: %s", - checkWindowsGuestConversionSupport? "windows guest" : "", convertHost.getName(), sourceVM, e.getMessage()); + checkWindowsGuestConversionSupport? "windows guest" : "", convertHost, sourceVM, e.getMessage()); logger.error(err); throw new CloudRuntimeException(err); } if (!checkConvertInstanceAnswer.getResult()) { String err = String.format("The host %s doesn't support conversion of instance %s from VMware to KVM due to: %s", - convertHost.getName(), sourceVM, checkConvertInstanceAnswer.getDetails()); + convertHost, sourceVM, checkConvertInstanceAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -1956,8 +1956,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { ServiceOfferingVO serviceOffering, Map dataDiskOfferingMap, DataStoreTO temporaryConvertLocation, String ovfTemplateDirConvertLocation ) { - logger.debug(String.format("Delegating the conversion of instance %s from VMware to KVM to the host %s (%s) using OVF %s on conversion datastore", - sourceVM, convertHost.getId(), convertHost.getName(), ovfTemplateDirConvertLocation)); + logger.debug("Delegating the conversion of instance {} from VMware to KVM to the host {} using OVF {} on conversion datastore", + sourceVM, convertHost, ovfTemplateDirConvertLocation); RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM); List destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap); @@ -1970,15 +1970,15 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { convertAnswer = agentManager.send(convertHost.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Could not send the convert instance command to host %s (%s) due to: %s", - convertHost.getId(), convertHost.getName(), e.getMessage()); + String err = String.format("Could not send the convert instance command to host %s due to: %s", + convertHost, e.getMessage()); logger.error(err, e); throw new CloudRuntimeException(err); } if (!convertAnswer.getResult()) { String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s", - sourceVM, convertHost.getName(), convertAnswer.getDetails()); + sourceVM, convertHost, convertAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -1992,8 +1992,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { DataStoreTO temporaryConvertLocation, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName ) { - logger.debug(String.format("Delegating the conversion of instance %s from VMware to KVM to the host %s (%s) after OVF export through ovftool", - sourceVM, convertHost.getId(), convertHost.getName())); + logger.debug("Delegating the conversion of instance {} from VMware to KVM to the host {} after OVF export through ovftool", sourceVM, convertHost); RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName); List destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap); @@ -2021,15 +2020,15 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { convertAnswer = agentManager.send(convertHost.getId(), convertInstanceCommand); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Could not send the convert instance command to host %s (%s) due to: %s", - convertHost.getId(), convertHost.getName(), e.getMessage()); + String err = String.format("Could not send the convert instance command to host %s due to: %s", + convertHost, e.getMessage()); logger.error(err, e); throw new CloudRuntimeException(err); } if (!convertAnswer.getResult()) { String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s", - sourceVM, convertHost.getName(), convertAnswer.getDetails()); + sourceVM, convertHost, convertAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -2042,8 +2041,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { importAnswer = agentManager.send(importHost.getId(), importCmd); } catch (AgentUnavailableException | OperationTimedoutException e) { String err = String.format( - "Could not send the import converted instance command to host %d (%s) due to: %s", - importHost.getId(), importHost.getName(), e.getMessage()); + "Could not send the import converted instance command to host %s due to: %s", + importHost, e.getMessage()); logger.error(err, e); throw new CloudRuntimeException(err); } @@ -2051,7 +2050,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (!importAnswer.getResult()) { String err = String.format( "The import process failed for instance %s from VMware to KVM on host %s: %s", - sourceVM, importHost.getName(), importAnswer.getDetails()); + sourceVM, importHost, importAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -2085,7 +2084,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { pools = getPoolsWithMatchingTags(pools, diskOfferingTags); if (pools.isEmpty()) { - String msg = String.format("Cannot find suitable storage pools in cluster %s for the conversion", destinationCluster.getName()); + String msg = String.format("Cannot find suitable storage pools in cluster %s for the conversion", destinationCluster); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -2301,7 +2300,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { String instanceName = vmVO.getInstanceName(); if (!existsVMToUnmanage(instanceName, hostId)) { - throw new CloudRuntimeException("VM with id = " + vmVO.getUuid() + " is not found in the hypervisor"); + throw new CloudRuntimeException(String.format("VM %s is not found in the hypervisor", vmVO)); } return userVmManager.unmanageUserVM(vmId); @@ -2315,11 +2314,11 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { command.setInstanceName(instanceName); Answer ans = agentManager.easySend(hostId, command); if (!(ans instanceof PrepareUnmanageVMInstanceAnswer)) { - throw new CloudRuntimeException("Error communicating with host " + hostId); + throw new CloudRuntimeException(String.format("Error communicating with host %s", hostDao.findById(hostId))); } PrepareUnmanageVMInstanceAnswer answer = (PrepareUnmanageVMInstanceAnswer) ans; if (!answer.getResult()) { - logger.error("Error verifying VM " + instanceName + " exists on host with ID = " + hostId + ": " + answer.getDetails()); + logger.error("Error verifying VM {} exists on host {}: {}", instanceName::toString, () -> hostDao.findById(hostId), answer::getDetails); } return answer.getResult(); } @@ -2372,7 +2371,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); } catch (ResourceAllocationException e) { - logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error("VM resource allocation error for account: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } String displayName = cmd.getDisplayName(); @@ -2560,7 +2559,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - logger.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + logger.warn("Import failed for Vm: {} while finding deployment destination", userVm, e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); } @@ -2691,7 +2690,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - logger.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + logger.warn("Import failed for Vm: {} while finding deployment destination", userVm, e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); } @@ -2785,7 +2784,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account {} from the network offering {} as a part of deployVM process", owner, requiredOfferings.get(0)); Network newNetwork = networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null, null); diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java index 2898fd5d0f3..0a8f3b99a09 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java @@ -115,7 +115,7 @@ public class VMScheduleManagerImpl extends MutualExclusiveIdsManagerBase impleme description = String.format("%s - %s", action, DateUtil.getHumanReadableSchedule(cronExpression)); } else description = cmd.getDescription(); - logger.warn(String.format("Using timezone [%s] for running the schedule for VM [%s], as an equivalent of [%s].", timeZoneId, vm.getUuid(), cmdTimeZone)); + logger.warn("Using timezone [{}] for running the schedule for VM [{}], as an equivalent of [{}].", timeZoneId, vm, cmdTimeZone); String finalDescription = description; VMSchedule.Action finalAction = action; @@ -212,8 +212,8 @@ public class VMScheduleManagerImpl extends MutualExclusiveIdsManagerBase impleme timeZone = TimeZone.getTimeZone(cmdTimeZone); timeZoneId = timeZone.getID(); if (!timeZoneId.equals(cmdTimeZone)) { - logger.warn(String.format("Using timezone [%s] for running the schedule [%s] for VM %s, as an equivalent of [%s].", - timeZoneId, vmSchedule.getSchedule(), vmSchedule.getVmId(), cmdTimeZone)); + logger.warn("Using timezone [{}] for running the schedule [{}] for VM {}, as an equivalent of [{}].", + timeZoneId, vmSchedule.getSchedule(), userVmManager.getUserVm(vmSchedule.getVmId()), cmdTimeZone); } vmSchedule.setTimeZone(timeZoneId); } else { diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java index 139a4d0be1f..7410fb1c265 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java @@ -112,7 +112,8 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu @Override public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { if (!vmSchedule.getEnabled()) { - logger.debug(String.format("VM Schedule [id=%s] for VM [id=%s] is disabled. Not scheduling next job.", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.debug("VM Schedule {} for VM {} with id {} is disabled. Not scheduling next job.", + vmSchedule::toString, () -> userVmManager.getUserVm(vmSchedule.getVmId()), vmSchedule::getVmId); return null; } @@ -122,7 +123,7 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu VirtualMachine vm = userVmManager.getUserVm(vmSchedule.getVmId()); if (vm == null) { - logger.info(String.format("VM [id=%s] is removed. Disabling VM schedule [id=%s].", vmSchedule.getVmId(), vmSchedule.getUuid())); + logger.info("VM id={} is removed. Disabling VM schedule {}.", vmSchedule.getVmId(), vmSchedule); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -140,7 +141,7 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu zonedEndDate = ZonedDateTime.ofInstant(endDate.toInstant(), vmSchedule.getTimeZoneId()); } if (zonedEndDate != null && now.isAfter(zonedEndDate)) { - logger.info(String.format("End time is less than current time. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.info("End time is less than current time. Disabling VM schedule {} for VM {}.", vmSchedule, vm); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -154,7 +155,7 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu } if (ts == null) { - logger.info(String.format("No next schedule found. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.info("No next schedule found. Disabling VM schedule {} for VM {}.", vmSchedule, vm); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -165,7 +166,7 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu try { vmScheduledJobDao.persist(scheduledJob); ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, vm.getAccountId(), actionEventMap.get(vmSchedule.getAction()), - String.format("Scheduled action (%s) [vmId: %s scheduleId: %s] at %s", vmSchedule.getAction(), vm.getUuid(), vmSchedule.getUuid(), scheduledDateTime), + String.format("Scheduled action (%s) [vm: %s, schedule: %s] at %s", vmSchedule.getAction(), vm, vmSchedule, scheduledDateTime), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), true, 0); } catch (EntityExistsException exception) { logger.debug("Job is already scheduled."); @@ -246,7 +247,7 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu try { scheduleNextJob(schedule, timestamp); } catch (Exception e) { - logger.warn("Error in scheduling next job for schedule " + schedule.getUuid(), e); + logger.warn("Error in scheduling next job for schedule {}", schedule, e); } } } @@ -272,7 +273,8 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu if (logger.isDebugEnabled()) { final Date scheduledTimestamp = vmScheduledJob.getScheduledTime(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - logger.debug(String.format("Executing %s for VM id %d for schedule id: %d at %s", vmScheduledJob.getAction(), vmScheduledJob.getVmId(), vmScheduledJob.getVmScheduleId(), displayTime)); + logger.debug("Executing {} for VM {} for scheduled job: {} at {}", + vmScheduledJob.getAction(), vm, vmScheduledJob, displayTime); } tmpVMScheduleJob = vmScheduledJobDao.acquireInLockTable(vmScheduledJob.getId()); @@ -282,7 +284,7 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu vmScheduledJobDao.update(vmScheduledJob.getId(), tmpVMScheduleJob); } } catch (final Exception e) { - logger.warn(String.format("Executing scheduled job id: %s failed due to %s", vmScheduledJob.getId(), e)); + logger.warn("Executing scheduled job {} failed due to {}", vmScheduledJob, e); } finally { if (tmpVMScheduleJob != null) { vmScheduledJobDao.releaseFromLockTable(vmScheduledJob.getId()); @@ -293,13 +295,14 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu Long processJob(VMScheduledJob vmScheduledJob, VirtualMachine vm) { if (!Arrays.asList(VirtualMachine.State.Running, VirtualMachine.State.Stopped).contains(vm.getState())) { - logger.info(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is invalid state: %s", vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); + logger.info("Skipping action ({}) for [vm: {}, scheduled job: {}] because VM is invalid state: {}", + vmScheduledJob.getAction(), vm, vmScheduledJob, vm.getState()); return null; } final Long eventId = ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), null, actionEventMap.get(vmScheduledJob.getAction()), true, - String.format("Executing action (%s) for VM Id:%s", vmScheduledJob.getAction(), vm.getUuid()), + String.format("Executing action (%s) for VM: %s", vmScheduledJob.getAction(), vm), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), 0); if (vm.getState() == VirtualMachine.State.Running) { @@ -317,8 +320,8 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu return executeStartVMJob(vm, eventId); } - logger.warn(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is in state: %s", - vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); + logger.warn("Skipping action ({}) for [vm: {}, scheduled job: {}] because VM is in state: {}", + vmScheduledJob.getAction(), vm, vmScheduledJob, vm.getState()); return null; } @@ -329,7 +332,8 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu VirtualMachine vm = userVmManager.getUserVm(vmId); for (final VMScheduledJob skippedVmScheduledJobVO : skippedVmScheduledJobVOS) { VMScheduledJob scheduledJob = jobsToExecute.get(vmId); - logger.info(String.format("Skipping scheduled job [id: %s, vmId: %s] because of conflict with another scheduled job [id: %s]", skippedVmScheduledJobVO.getUuid(), vm.getUuid(), scheduledJob.getUuid())); + logger.info("Skipping scheduled job {} for vm {} because of conflict with another scheduled job {}", + skippedVmScheduledJobVO, vm, scheduledJob); } } } diff --git a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java index ba0d3cab002..e04c5e181e7 100644 --- a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java +++ b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java @@ -17,6 +17,12 @@ package com.cloud.alert; import com.cloud.alert.dao.AlertDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.logging.log4j.Logger; import org.junit.Assert; @@ -41,6 +47,15 @@ public class AlertManagerImplTest { @Mock AlertDao alertDaoMock; + @Mock + private DataCenterDao _dcDao; + + @Mock + private HostPodDao _podDao; + + @Mock + private ClusterDao _clusterDao; + @Mock AlertVO alertVOMock; @@ -52,6 +67,16 @@ public class AlertManagerImplTest { private void sendMessage (){ try { + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(0L); + Mockito.when(_dcDao.findById(0L)).thenReturn(zone); + HostPodVO pod = Mockito.mock(HostPodVO.class); + Mockito.when(pod.getId()).thenReturn(1L); + Mockito.when(_podDao.findById(1L)).thenReturn(pod); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(_clusterDao.findById(1L)).thenReturn(cluster); + alertManagerImplMock.sendAlert(AlertManager.AlertType.ALERT_TYPE_CPU, 0, 1l, 1l, "", ""); } catch (UnsupportedEncodingException | MessagingException e) { Assert.fail(); diff --git a/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java b/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java index 2c7a2a7fb52..c9f0993213c 100644 --- a/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java +++ b/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java @@ -20,6 +20,7 @@ package com.cloud.capacity; import com.cloud.capacity.dao.CapacityDao; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; +import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.vm.VirtualMachine; @@ -37,6 +38,7 @@ public class CapacityManagerTest { ServiceOfferingDao SOfferingDao = mock(ServiceOfferingDao.class); ClusterDetailsDao ClusterDetailsDao = mock(com.cloud.dc.ClusterDetailsDao.class); CapacityManagerImpl capMgr; + private Host host = mock(Host.class); private ServiceOfferingVO svo = mock(ServiceOfferingVO.class); private CapacityVO cvoCpu = mock(CapacityVO.class); private CapacityVO cvoRam = mock(CapacityVO.class); @@ -71,7 +73,7 @@ public class CapacityManagerTest { when(clusterDetailRam.getValue()).thenReturn("1.5"); when(clusterDetailCpu.getValue()).thenReturn("2"); when(CDao.update(anyLong(), isA(CapacityVO.class))).thenReturn(true); - boolean hasCapacity = capMgr.checkIfHostHasCapacity(1l, 500, 1024 * 1024 * 1024, false, 2, 2, false); + boolean hasCapacity = capMgr.checkIfHostHasCapacity(host, 500, 1024 * 1024 * 1024, false, 2, 2, false); Assert.assertTrue(hasCapacity); } diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index 4ae871e1ba5..ceffe019377 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -492,7 +492,7 @@ public class ConfigurationManagerTest { when(configurationMgr._firewallDao.countRulesByIpId(anyLong())).thenReturn(0L); - when(configurationMgr._ipAddrMgr.disassociatePublicIpAddress(anyLong(), anyLong(), any(Account.class))).thenReturn(true); + when(configurationMgr._ipAddrMgr.disassociatePublicIpAddress(any(), anyLong(), any(Account.class))).thenReturn(true); when(configurationMgr._vlanDao.releaseFromLockTable(anyLong())).thenReturn(true); diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index 482d17908f4..58bc8509768 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -891,7 +891,7 @@ public class DeploymentPlanningManagerImplTest { Mockito.when(capacityMgr.checkIfHostReachMaxGuestLimit(host)).thenReturn(false); Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); Mockito.when(capacityMgr.checkIfHostHasCapacity( - ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyLong(), ArgumentMatchers.anyBoolean(), @@ -902,7 +902,7 @@ public class DeploymentPlanningManagerImplTest { Mockito.when(serviceOfferingDetailsDao.findDetail(vmProfile.getServiceOfferingId(), GPU.Keys.vgpuType.toString())).thenReturn(null); Mockito.doReturn(true).when(_dpm).checkVmProfileAndHost(vmProfile, host); - Mockito.doReturn(true).when(_dpm).checkIfHostFitsPlannerUsage(ArgumentMatchers.anyLong(), ArgumentMatchers.nullable(PlannerResourceUsage.class)); + Mockito.doReturn(true).when(_dpm).checkIfHostFitsPlannerUsage(ArgumentMatchers.any(Host.class), ArgumentMatchers.nullable(PlannerResourceUsage.class)); Mockito.when(clusterDetailsDao.findDetail(ArgumentMatchers.anyLong(), ArgumentMatchers.anyString())).thenReturn(new ClusterDetailsVO(clusterId, "mock", "1")); DeploymentClusterPlanner planner = Mockito.spy(new FirstFitPlanner()); diff --git a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java index 27b6e1b1b4c..53ae5d2279e 100644 --- a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java +++ b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java @@ -218,13 +218,11 @@ public class HighAvailabilityManagerImplTest { List vms = new ArrayList(); VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); Mockito.lenient().when(vm1.getHostId()).thenReturn(1l); - //Mockito.when(vm1.getInstanceName()).thenReturn("i-2-3-VM"); Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); Mockito.when(vm1.isHaEnabled()).thenReturn(true); vms.add(vm1); VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm2.getHostId()).thenReturn(1l); - //Mockito.when(vm2.getInstanceName()).thenReturn("r-2-VM"); Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.DomainRouter); Mockito.when(vm2.isHaEnabled()).thenReturn(true); Mockito.when(vm2.getDataCenterId()).thenReturn(1L); diff --git a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java index 02ddd0c983e..b59eeaa4624 100644 --- a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java @@ -22,6 +22,7 @@ import com.cloud.dc.DataCenterGuestIpv6PrefixVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.Vlan; import com.cloud.dc.VlanVO; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterGuestIpv6PrefixDao; import com.cloud.dc.dao.VlanDao; import com.cloud.event.ActionEventUtils; @@ -36,6 +37,7 @@ import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.Ipv6GuestPrefixSubnetNetworkMapDao; import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.firewall.FirewallService; import com.cloud.network.guru.PublicNetworkGuru; import com.cloud.network.rules.FirewallManager; @@ -119,6 +121,10 @@ public class Ipv6ServiceImplTest { @Mock IPAddressDao ipAddressDao; @Mock + DataCenterDao zoneDao; + @Mock + PhysicalNetworkDao physicalNetworkDao; + @Mock NetworkOrchestrationService networkOrchestrationService; FirewallManager firewallManager = Mockito.mock(FirewallManager.class); @@ -229,12 +235,13 @@ public class Ipv6ServiceImplTest { @Test(expected = ResourceAllocationException.class) @DB public void testNoPrefixesPreAllocateIpv6SubnetForNetwork() throws ResourceAllocationException, MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException { - final long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(1L); final List prefixes = new ArrayList<>(); - Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId)).thenReturn(prefixes); + Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId())).thenReturn(prefixes); TransactionLegacy txn = TransactionLegacy.open("testNoPrefixesPreAllocateIpv6SubnetForNetwork"); try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } finally { txn.close("testNoPrefixesPreAllocateIpv6SubnetForNetwork"); } @@ -243,17 +250,18 @@ public class Ipv6ServiceImplTest { @Test @DB public void testExistingPreAllocateIpv6SubnetForNetwork() { - final long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(1L); final List prefixes = new ArrayList<>(); DataCenterGuestIpv6PrefixVO prefix = prepareMocksForIpv6Subnet(); prefixes.add(prefix); Ipv6GuestPrefixSubnetNetworkMapVO ipv6GuestPrefixSubnetNetworkMap = new Ipv6GuestPrefixSubnetNetworkMapVO(1L, "fd17:5:8a43:e2a4::/64", null, Ipv6GuestPrefixSubnetNetworkMap.State.Free); - Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId)).thenReturn(prefixes); + Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId())).thenReturn(prefixes); Mockito.when(ipv6GuestPrefixSubnetNetworkMapDao.findFirstAvailable(prefix.getId())).thenReturn(ipv6GuestPrefixSubnetNetworkMap); updatedPrefixSubnetMap.clear(); try (TransactionLegacy txn = TransactionLegacy.open("testNoPrefixesPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } catch (ResourceAllocationException e) { Assert.fail("ResourceAllocationException"); } @@ -269,7 +277,8 @@ public class Ipv6ServiceImplTest { @Test @DB public void testNewPreAllocateIpv6SubnetForNetwork() { - final long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(1L); final List prefixes = new ArrayList<>(); DataCenterGuestIpv6PrefixVO prefix = prepareMocksForIpv6Subnet(); final IPv6Network ip6Prefix = IPv6Network.fromString(prefix.getPrefix()); @@ -279,14 +288,14 @@ public class Ipv6ServiceImplTest { subnets.add(splits.next().toString()); } prefixes.add(prefix); - Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId)).thenReturn(prefixes); + Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId())).thenReturn(prefixes); Mockito.when(ipv6GuestPrefixSubnetNetworkMapDao.findFirstAvailable(prefix.getId())).thenReturn(null); Mockito.when(ipv6GuestPrefixSubnetNetworkMapDao.listUsedByPrefix(prefix.getId())).thenReturn(new ArrayList<>()); persistedPrefixSubnetMap.clear(); // No subnet is used from the prefix, should allocate any subnet try (TransactionLegacy txn = TransactionLegacy.open("testNewPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } catch (ResourceAllocationException e) { Assert.fail("ResourceAllocationException"); } @@ -306,7 +315,7 @@ public class Ipv6ServiceImplTest { // All subnets from the prefix are already in use, should return ResourceAllocationException try (TransactionLegacy txn = TransactionLegacy.open("testNewPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); Assert.fail("ResourceAllocationException expected but not returned"); } catch (ResourceAllocationException ignored) {} } @@ -316,7 +325,7 @@ public class Ipv6ServiceImplTest { Ipv6GuestPrefixSubnetNetworkMapVO poppedUsedSubnetMap = usedSubnets.remove(2); try (TransactionLegacy txn = TransactionLegacy.open("testNewPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } catch (ResourceAllocationException e) { Assert.fail("ResourceAllocationException"); } @@ -408,6 +417,9 @@ public class Ipv6ServiceImplTest { Nic nic = Mockito.mock(Nic.class); Mockito.when(nic.getIPv6Address()).thenReturn(null); Mockito.when(nic.getBroadcastUri()).thenReturn(URI.create(vlan)); + DataCenterVO zoneMock = Mockito.mock(DataCenterVO.class); + Mockito.when(zoneDao.findById(Mockito.anyLong())).thenReturn(zoneMock); + Mockito.when(zoneMock.getUuid()).thenReturn("uuid"); try (TransactionLegacy txn = TransactionLegacy.open("testNewErrorAssignPublicIpv6ToNetwork")) { ipv6Service.assignPublicIpv6ToNetwork(Mockito.mock(Network.class), nic); } @@ -611,6 +623,9 @@ public class Ipv6ServiceImplTest { Mockito.when(vlanVO.getVlanTag()).thenReturn(vlan); Mockito.when(vlanDao.findById(Mockito.anyLong())).thenReturn(vlanVO); Mockito.when(vlanDao.listIpv6RangeByZoneIdAndVlanId(Mockito.anyLong(), Mockito.anyString())).thenReturn(new ArrayList<>()); + DataCenterVO zoneMock = Mockito.mock(DataCenterVO.class); + Mockito.when(zoneDao.findById(zoneId)).thenReturn(zoneMock); + Mockito.when(zoneMock.getUuid()).thenReturn("uuid"); try { ipv6Service.checkNetworkIpv6Upgrade(network); Assert.fail("No InsufficientAddressCapacityException"); diff --git a/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java b/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java index cfdb857b5bf..73437293933 100644 --- a/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java +++ b/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java @@ -132,7 +132,7 @@ public class MockFirewallManagerImpl extends ManagerBase implements FirewallMana } @Override - public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException { // TODO Auto-generated method stub return false; } @@ -145,7 +145,7 @@ public class MockFirewallManagerImpl extends ManagerBase implements FirewallMana } @Override - public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index 51d5a61cbca..aea29d59356 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -577,7 +577,7 @@ public class NetworkServiceImplTest { if(ipv6 && Network.GuestType.Isolated.equals(guestType)) { Mockito.when(networkOfferingDao.isIpv6Supported(networkOfferingId)).thenReturn(true); try { - Mockito.when(ipv6Service.preAllocateIpv6SubnetForNetwork(Mockito.anyLong())).thenReturn(new Pair<>(IP6_GATEWAY, IP6_CIDR)); + Mockito.when(ipv6Service.preAllocateIpv6SubnetForNetwork(Mockito.any())).thenReturn(new Pair<>(IP6_GATEWAY, IP6_CIDR)); } catch (ResourceAllocationException e) { Assert.fail(String.format("failure with exception: %s", e.getMessage())); } @@ -1175,7 +1175,7 @@ public class NetworkServiceImplTest { when(networkVO.getId()).thenReturn(networkId); when(networkVO.getGuestType()).thenReturn(Network.GuestType.Isolated); try { - when(ipAddressManagerMock.allocateIp(any(), anyBoolean(), any(), anyLong(), any(), any(), eq(srcNatIp))).thenReturn(ipAddress); + when(ipAddressManagerMock.allocateIp(any(), anyBoolean(), any(), any(), any(), any(), eq(srcNatIp))).thenReturn(ipAddress); service.checkAndSetRouterSourceNatIp(account, createNetworkCmd, networkVO); } catch (InsufficientAddressCapacityException | ResourceAllocationException e) { Assert.fail(e.getMessage()); diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index b391aeb9f07..4c5531277fe 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -86,6 +86,7 @@ import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.EntityManager; @@ -100,6 +101,7 @@ import com.cloud.vm.UserVmService; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VmStats; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; @@ -265,6 +267,7 @@ public class AutoScaleManagerImplTest { final static String INVALID = "invalid"; private static final Long counterId = 1L; + private static final String counterUuid = "1111-1111-1100"; private static final String counterName = "counter name"; private static final Counter.Source counterSource = Counter.Source.CPU; private static final String counterValue = "counter value"; @@ -397,7 +400,7 @@ public class AutoScaleManagerImplTest { public void setUp() { account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); - account.setId(2L); + account.setId(5L); user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); CallContext.register(user, account); @@ -1202,7 +1205,7 @@ public class AutoScaleManagerImplTest { when(autoScaleVmGroupVmMapDao.removeByGroup(vmGroupId)).thenReturn(true); when(asGroupStatisticsDao.removeByGroupId(vmGroupId)).thenReturn(true); - boolean result = autoScaleManagerImplSpy.deleteAutoScaleVmGroupsByAccount(accountId); + boolean result = autoScaleManagerImplSpy.deleteAutoScaleVmGroupsByAccount(account); Assert.assertTrue(result); @@ -1218,7 +1221,7 @@ public class AutoScaleManagerImplTest { when(asPolicyDao.removeByAccountId(accountId)).thenReturn(2); when(conditionDao.removeByAccountId(accountId)).thenReturn(3); - autoScaleManagerImplSpy.cleanUpAutoScaleResources(accountId); + autoScaleManagerImplSpy.cleanUpAutoScaleResources(account); Mockito.verify(autoScaleVmProfileDao).removeByAccountId(accountId); Mockito.verify(asPolicyDao).removeByAccountId(accountId); @@ -1265,15 +1268,14 @@ public class AutoScaleManagerImplTest { Mockito.doReturn(networkMock).when(autoScaleManagerImplSpy).getNetwork(loadBalancerId); when(networkMock.getId()).thenReturn(networkId); - when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic); when(userVmService.createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any())).thenReturn(userVmMock); - long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); + UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); - Assert.assertEquals((long) virtualMachineId, result); + Assert.assertEquals(userVmMock, result); String vmHostNamePattern = autoScaleManagerImplSpy.VM_HOSTNAME_PREFIX + vmGroupName + "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; @@ -1313,7 +1315,6 @@ public class AutoScaleManagerImplTest { Mockito.doReturn(networkMock).when(autoScaleManagerImplSpy).getNetwork(loadBalancerId); when(networkMock.getId()).thenReturn(networkId); - when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); when(userVmService.createAdvancedSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), @@ -1321,9 +1322,9 @@ public class AutoScaleManagerImplTest { when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, List.of(networkId), Collections.emptyList())).thenReturn(true); - long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); + UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); - Assert.assertEquals((long) virtualMachineId, result); + Assert.assertEquals(userVmMock, result); String vmHostNamePattern = autoScaleManagerImplSpy.VM_HOSTNAME_PREFIX + vmGroupName + "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; @@ -1363,7 +1364,6 @@ public class AutoScaleManagerImplTest { Mockito.doReturn(networkMock).when(autoScaleManagerImplSpy).getNetwork(loadBalancerId); when(networkMock.getId()).thenReturn(networkId); - when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); when(userVmService.createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), @@ -1371,9 +1371,9 @@ public class AutoScaleManagerImplTest { when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, List.of(networkId), Collections.emptyList())).thenReturn(false); - long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); + UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); - Assert.assertEquals((long) virtualMachineId, result); + Assert.assertEquals(userVmMock, result); String vmHostNamePattern = autoScaleManagerImplSpy.VM_HOSTNAME_PREFIX + vmGroupNameWithMaxLength.substring(0, 41) + "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; @@ -1503,18 +1503,21 @@ public class AutoScaleManagerImplTest { when(autoScaleVmGroupDao.updateState(vmGroupId, AutoScaleVmGroup.State.ENABLED, AutoScaleVmGroup.State.SCALING)).thenReturn(true); when(autoScaleVmGroupDao.updateState(vmGroupId, AutoScaleVmGroup.State.SCALING, AutoScaleVmGroup.State.ENABLED)).thenReturn(true); - Mockito.doReturn(virtualMachineId).when(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); + Mockito.doReturn(userVmMock).when(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); + when(userVmMock.getId()).thenReturn(virtualMachineId); when(asVmGroupMock.getLoadBalancerId()).thenReturn(loadBalancerId); when(lbVmMapDao.listByLoadBalancerId(loadBalancerId)).thenReturn(Arrays.asList(loadBalancerVMMapMock)); when(loadBalancerVMMapMock.getInstanceId()).thenReturn(virtualMachineId + 1); when(loadBalancingRulesService.assignToLoadBalancer(anyLong(), any(), any(), eq(true))).thenReturn(true); + Mockito.doReturn(new Pair>(userVmMock, null)).when(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null); autoScaleManagerImplSpy.doScaleUp(vmGroupId, 1); Mockito.verify(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); Mockito.verify(loadBalancingRulesService).assignToLoadBalancer(anyLong(), any(), any(), eq(true)); + Mockito.verify(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null); } } @@ -1969,8 +1972,8 @@ public class AutoScaleManagerImplTest { autoScaleManagerImplSpy.processPerformanceMonitorAnswer(countersMap, countersNumberMap, groupTO, params, details); - Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleUpCounterId), eq(scaleUpConditionId), eq(0L), eq(value1), eq(AutoScaleValueType.INSTANT_VM)); - Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleDownCounterId), eq(scaleDownConditionId), eq(0L), eq(value2), eq(AutoScaleValueType.INSTANT_VM)); + Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleUpCounterId), any(ConditionTO.class), any(AutoScalePolicyTO.class), eq(value1), eq(AutoScaleValueType.INSTANT_VM)); + Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleDownCounterId), any(ConditionTO.class), any(AutoScalePolicyTO.class), eq(value2), eq(AutoScaleValueType.INSTANT_VM)); } @Test @@ -2109,6 +2112,11 @@ public class AutoScaleManagerImplTest { public void updateCountersMapWithInstantDataForMemory() { AutoScaleVmGroupTO groupTO = Mockito.mock(AutoScaleVmGroupTO.class); AutoScaleVmProfileTO profileTO = Mockito.mock(AutoScaleVmProfileTO.class); + AutoScalePolicyTO scaleUpPolicyTO = Mockito.mock(AutoScalePolicyTO.class); + ConditionTO conditionTO = Mockito.mock(ConditionTO.class); + + when(conditionTO.getId()).thenReturn(conditionId); + when(scaleUpPolicyTO.getId()).thenReturn(scaleUpPolicyId); when(counterDao.findById(counterId)).thenReturn(counterMock); when(counterMock.getSource()).thenReturn(Counter.Source.MEMORY); @@ -2127,7 +2135,7 @@ public class AutoScaleManagerImplTest { double value = 512; autoScaleManagerImplSpy.updateCountersMapWithInstantData(countersMap, countersNumberMap, - groupTO, counterId, conditionId, scaleUpPolicyId, value, AutoScaleValueType.INSTANT_VM); + groupTO, counterId, conditionTO, scaleUpPolicyTO, value, AutoScaleValueType.INSTANT_VM); Assert.assertEquals(1, countersMap.size()); Assert.assertEquals(1, countersNumberMap.size()); @@ -2140,7 +2148,11 @@ public class AutoScaleManagerImplTest { public void updateCountersMapWithInstantDataForCPU() { AutoScaleVmGroupTO groupTO = Mockito.mock(AutoScaleVmGroupTO.class); AutoScaleVmProfileTO profileTO = Mockito.mock(AutoScaleVmProfileTO.class); + AutoScalePolicyTO scaleUpPolicyTO = Mockito.mock(AutoScalePolicyTO.class); + ConditionTO conditionTO = Mockito.mock(ConditionTO.class); + when(conditionTO.getId()).thenReturn(conditionId); + when(scaleUpPolicyTO.getId()).thenReturn(scaleUpPolicyId); when(counterDao.findById(counterId)).thenReturn(counterMock); when(counterMock.getSource()).thenReturn(Counter.Source.CPU); @@ -2153,7 +2165,7 @@ public class AutoScaleManagerImplTest { double value = 0.5; autoScaleManagerImplSpy.updateCountersMapWithInstantData(countersMap, countersNumberMap, - groupTO, counterId, conditionId, scaleUpPolicyId, value, AutoScaleValueType.INSTANT_VM); + groupTO, counterId, conditionTO, scaleUpPolicyTO, value, AutoScaleValueType.INSTANT_VM); Assert.assertEquals(1, countersMap.size()); Assert.assertEquals(1, countersNumberMap.size()); @@ -2287,9 +2299,9 @@ public class AutoScaleManagerImplTest { vmStatsById.put(virtualMachineId, vmStats); Map> policyCountersMap = new HashMap<>(); - CounterTO counter1 = new CounterTO(counterId, counterName, Counter.Source.CPU, counterValue, counterProvider); - CounterTO counter2 = new CounterTO(counterId + 1, counterName, Counter.Source.MEMORY, counterValue, counterProvider); - CounterTO counter3 = new CounterTO(counterId + 2, counterName, Counter.Source.VIRTUALROUTER, counterValue, counterProvider); + CounterTO counter1 = new CounterTO(counterId, counterUuid, counterName, Counter.Source.CPU, counterValue, counterProvider); + CounterTO counter2 = new CounterTO(counterId + 1, counterUuid, counterName, Counter.Source.MEMORY, counterValue, counterProvider); + CounterTO counter3 = new CounterTO(counterId + 2, counterUuid, counterName, Counter.Source.VIRTUALROUTER, counterValue, counterProvider); policyCountersMap.put(scaleUpPolicyId, Arrays.asList(counter1, counter2, counter3)); when(asGroupStatisticsDao.persist(any())).thenReturn(Mockito.mock(AutoScaleVmGroupStatisticsVO.class)); diff --git a/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java b/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java index 8a5b965514c..20ddb39d943 100644 --- a/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java +++ b/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java @@ -23,6 +23,7 @@ import com.cloud.network.RemoteAccessVpn; import com.cloud.network.VpnUser; import com.cloud.network.router.VpcVirtualNetworkApplianceManagerImpl; import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.dao.VpcDao; import com.cloud.utils.db.EntityManager; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.dao.DomainRouterDao; @@ -51,7 +52,10 @@ import static org.mockito.Mockito.when; public class VpcVirtualRouterElementTest { @Mock DataCenterDao _dcDao; - @Mock private DomainRouterDao _routerDao; + @Mock + private DomainRouterDao _routerDao; + @Mock + VpcDao _vpcDao; @Mock EntityManager _entityMgr; diff --git a/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java b/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java index 182ad83c6ce..0abcf9591d4 100644 --- a/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java +++ b/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java @@ -179,7 +179,7 @@ public class MockProjectManagerImpl extends ManagerBase implements ProjectManage } @Override - public boolean deleteAccountFromProject(long projectId, long accountId) { + public boolean deleteAccountFromProject(long projectId, Account account) { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 6aae7a091d3..e8b297ff188 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -620,7 +620,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana } @Override - public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) { + public boolean isGPUDeviceAvailable(final Host host, final String groupName, final String vgpuType) { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 666324d4ed2..9630b341bc9 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -1047,9 +1047,6 @@ public class VolumeApiServiceImplTest { volumeApiServiceImpl.cleanVolumesCache(volumeVoMock); - Mockito.verify(dataStoreMock1).getName(); - Mockito.verify(dataStoreMock2).getName(); - Mockito.verify(volumeInfoMock1).delete(); Mockito.verify(volumeInfoMock2).delete(); } diff --git a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java index 3be6e02d04b..c700188a599 100644 --- a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java +++ b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java @@ -64,11 +64,11 @@ public class StoragePoolMonitorTest { Mockito.when(poolDao.listBy(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class), Mockito.anyBoolean())).thenReturn(Collections.emptyList()); Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host.getId(), pool.getId()); + Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host, pool.getId()); storagePoolMonitor.processConnect(host, cmd, false); - Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(host.getId()), Mockito.eq(pool.getId())); + Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(host), Mockito.eq(pool.getId())); Mockito.verify(storageManager, Mockito.times(1)).createCapacityEntry(Mockito.eq(pool.getId())); } @@ -77,7 +77,7 @@ public class StoragePoolMonitorTest { Mockito.when(poolDao.listBy(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class), Mockito.anyBoolean())).thenReturn(Collections.emptyList()); Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doThrow(new StorageUnavailableException("unable to mount storage", 123L)).when(storageManager).connectHostToSharedPool(Mockito.anyLong(), Mockito.anyLong()); + Mockito.doThrow(new StorageUnavailableException("unable to mount storage", 123L)).when(storageManager).connectHostToSharedPool(Mockito.any(), Mockito.anyLong()); storagePoolMonitor.processConnect(host, cmd, false); } diff --git a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java index 5307beb4aba..2a6d7af434a 100644 --- a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java +++ b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java @@ -451,12 +451,11 @@ public class HypervisorTemplateAdapterTest { DataCenterVO dataCenterVOMock = null; Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); - Mockito.when(dataStoreMock.getId()).thenReturn(2L); boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - Mockito.verify(loggerMock, Mockito.times(1)).warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", - zoneId, dataStoreMock.getId())); + Mockito.verify(loggerMock, Mockito.times(1)).warn("Unable to find zone by id [{}], so skip downloading template to its image store [{}].", + zoneId, dataStoreMock); Assert.assertFalse(result); } @@ -470,11 +469,10 @@ public class HypervisorTemplateAdapterTest { Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled); - Mockito.when(dataStoreMock.getId()).thenReturn(2L); boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - Mockito.verify(loggerMock, Mockito.times(1)).info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, dataStoreMock.getId())); + Mockito.verify(loggerMock, Mockito.times(1)).info("Zone [{}] is disabled. Skip downloading template to its image store [{}].", dataCenterVOMock, dataStoreMock); Assert.assertFalse(result); } @@ -488,13 +486,12 @@ public class HypervisorTemplateAdapterTest { Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); - Mockito.when(dataStoreMock.getId()).thenReturn(2L); Mockito.when(statsCollectorMock.imageStoreHasEnoughCapacity(any(DataStore.class))).thenReturn(false); boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - Mockito.verify(loggerMock, times(1)).info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", - dataStoreMock.getId())); + Mockito.verify(loggerMock, times(1)).info("Image store doesn't have enough capacity. Skip downloading template to this image store [{}].", + dataStoreMock); Assert.assertFalse(result); } diff --git a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java index 3f66c6017a4..5739114cb48 100644 --- a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java @@ -177,11 +177,11 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.when(_accountDao.findById(42l)).thenReturn(account); Mockito.doNothing().when(accountManagerImpl).checkAccess(Mockito.any(Account.class), Mockito.isNull(), Mockito.anyBoolean(), Mockito.any(Account.class)); Mockito.when(_accountDao.remove(42l)).thenReturn(true); - Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(42l)).thenReturn(true); + Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(account)).thenReturn(true); Mockito.lenient().when(_domainMgr.getDomain(Mockito.anyLong())).thenReturn(domain); Mockito.lenient().when(securityChecker.checkAccess(Mockito.any(Account.class), Mockito.any(Domain.class))).thenReturn(true); Mockito.when(_vmSnapshotDao.listByAccountId(Mockito.anyLong())).thenReturn(new ArrayList()); - Mockito.when(_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(42l)).thenReturn(true); + Mockito.when(_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(account)).thenReturn(true); List sshkeyList = new ArrayList(); SSHKeyPairVO sshkey = new SSHKeyPairVO(); @@ -205,7 +205,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.when(_accountDao.findById(42l)).thenReturn(account); Mockito.doNothing().when(accountManagerImpl).checkAccess(Mockito.any(Account.class), Mockito.isNull(), Mockito.anyBoolean(), Mockito.any(Account.class)); Mockito.when(_accountDao.remove(42l)).thenReturn(true); - Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(42l)).thenReturn(true); + Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(account)).thenReturn(true); Mockito.when(_userVmDao.listByAccountId(42l)).thenReturn(Arrays.asList(Mockito.mock(UserVmVO.class))); Mockito.when(_vmMgr.expunge(Mockito.any(UserVmVO.class))).thenReturn(false); Mockito.lenient().when(_domainMgr.getDomain(Mockito.anyLong())).thenReturn(domain); @@ -676,7 +676,6 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { long userVoDuplicatedMockId = 67l; UserVO userVoDuplicatedMock = Mockito.mock(UserVO.class); - Mockito.doReturn(userName).when(userVoDuplicatedMock).getUsername(); Mockito.doReturn(userVoDuplicatedMockId).when(userVoDuplicatedMock).getId(); long accountIdUserDuplicated = 98l; diff --git a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java index 39155986941..2d52f0aa52e 100644 --- a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java @@ -211,7 +211,7 @@ public class DomainManagerImplTest { @Test public void testDeleteDomainNoCleanup() { - Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.anyLong())).thenReturn(true); + Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.any())).thenReturn(true); domainManager.deleteDomain(DOMAIN_ID, testDomainCleanup); Mockito.verify(domainManager).deleteDomain(domain, testDomainCleanup); Mockito.verify(domainManager).removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources(domain); @@ -276,7 +276,7 @@ public class DomainManagerImplTest { Mockito.when(_accountDao.findCleanupsForRemovedAccounts(Mockito.anyLong())).thenReturn(new ArrayList()); Mockito.when(_dedicatedDao.listByDomainId(Mockito.anyLong())).thenReturn(new ArrayList()); Mockito.when(domainDaoMock.remove(Mockito.anyLong())).thenReturn(true); - Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.anyLong())).thenReturn(true); + Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.any())).thenReturn(true); try { Assert.assertTrue(domainManager.deleteDomain(20l, false)); @@ -307,7 +307,7 @@ public class DomainManagerImplTest { Mockito.when(domainDaoMock.remove(Mockito.anyLong())).thenReturn(true); Mockito.when(_resourceCountDao.removeEntriesByOwner(Mockito.anyLong(), Mockito.eq(ResourceOwnerType.Domain))).thenReturn(1l); Mockito.when(_resourceLimitDao.removeEntriesByOwner(Mockito.anyLong(), Mockito.eq(ResourceOwnerType.Domain))).thenReturn(1l); - Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.anyLong())).thenReturn(true); + Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.any())).thenReturn(true); try { Assert.assertTrue(domainManager.deleteDomain(20l, true)); diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index d49dcd0f00c..af02c2e1d31 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -546,7 +546,7 @@ public class UserVmManagerImplTest { private void configureValidateOrReplaceMacAddressTest(int times, String macAddress, String expectedMacAddress) throws InsufficientAddressCapacityException { Mockito.when(networkModel.getNextAvailableMacAddressInNetwork(Mockito.anyLong())).thenReturn(expectedMacAddress); - String returnedMacAddress = userVmManagerImpl.validateOrReplaceMacAddress(macAddress, 1l); + String returnedMacAddress = userVmManagerImpl.validateOrReplaceMacAddress(macAddress, _networkMock); Mockito.verify(networkModel, Mockito.times(times)).getNextAvailableMacAddressInNetwork(Mockito.anyLong()); assertEquals(expectedMacAddress, returnedMacAddress); @@ -710,7 +710,6 @@ public class UserVmManagerImplTest { Mockito.when(newRootDiskOffering.getId()).thenReturn(diskOfferingId); Mockito.when(newRootDiskOffering.getMinIops()).thenReturn(offeringMinIops); Mockito.when(newRootDiskOffering.getMaxIops()).thenReturn(offeringMaxIops); - Mockito.when(newRootDiskOffering.getName()).thenReturn("OfferingName"); return newRootDiskOffering; } diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerTest.java index 8be100d4570..1ed0a30af10 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerTest.java @@ -419,7 +419,7 @@ public class UserVmManagerTest { doReturn(VirtualMachine.State.Running).when(_vmInstance).getState(); //when(ApiDBUtils.getCpuOverprovisioningFactor()).thenReturn(3f); - when(_capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyLong(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false); + when(_capacityMgr.checkIfHostHasCapacity(any(), anyInt(), anyLong(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false); when(_itMgr.reConfigureVm(_vmInstance.getUuid(), so2, so1, new HashMap(), false)).thenReturn(_vmInstance); doReturn(true).when(_itMgr).upgradeVmDb(anyLong(), so1, so2); @@ -772,7 +772,6 @@ public class UserVmManagerTest { @Test public void testApplyUserDataInNetworkWithoutUserDataSupport() throws Exception { UserVm userVm = mock(UserVm.class); - when(userVm.getId()).thenReturn(1L); when(_nicMock.getNetworkId()).thenReturn(2L); when(_networkMock.getNetworkOfferingId()).thenReturn(3L); diff --git a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index 440431086ee..06c917a1244 100644 --- a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -353,7 +353,7 @@ public class VMSnapshotManagerTest { verify(_vmSnapshotMgr).changeUserVmServiceOffering(userVm, vmSnapshotVO); verify(_vmSnapshotMgr).getVmMapDetails(userVm); - verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); + verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(userVm), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); } @Test @@ -371,7 +371,7 @@ public class VMSnapshotManagerTest { when(_userVmManager.upgradeVirtualMachine(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture())).thenReturn(true); _vmSnapshotMgr.changeUserVmServiceOffering(userVm, vmSnapshotVO); verify(_vmSnapshotMgr).getVmMapDetails(userVm); - verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); + verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(userVm), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); } @Test(expected=CloudRuntimeException.class) @@ -379,7 +379,7 @@ public class VMSnapshotManagerTest { when(_userVmManager.upgradeVirtualMachine(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture())).thenReturn(false); _vmSnapshotMgr.changeUserVmServiceOffering(userVm, vmSnapshotVO); verify(_vmSnapshotMgr).getVmMapDetails(userVm); - verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); + verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(userVm), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); } @Test @@ -389,7 +389,7 @@ public class VMSnapshotManagerTest { put(userVmDetailMemory.getName(), userVmDetailMemory.getValue()); }}; when(_userVmManager.upgradeVirtualMachine(TEST_VM_ID, SERVICE_OFFERING_ID, details)).thenReturn(true); - _vmSnapshotMgr.upgradeUserVmServiceOffering(TEST_VM_ID, SERVICE_OFFERING_ID, details); + _vmSnapshotMgr.upgradeUserVmServiceOffering(userVm, SERVICE_OFFERING_ID, details); verify(_userVmManager).upgradeVirtualMachine(TEST_VM_ID, SERVICE_OFFERING_ID, details); } diff --git a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java index 8f05b716725..cdd23b0ccc2 100644 --- a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -577,7 +577,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu * @see com.cloud.configuration.ConfigurationManager#releaseDomainSpecificVirtualRanges(long) */ @Override - public boolean releaseDomainSpecificVirtualRanges(long domainId) { + public boolean releaseDomainSpecificVirtualRanges(Domain domain) { // TODO Auto-generated method stub return false; } @@ -586,7 +586,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu * @see com.cloud.configuration.ConfigurationManager#releaseAccountSpecificVirtualRanges(long) */ @Override - public boolean releaseAccountSpecificVirtualRanges(long accountId) { + public boolean releaseAccountSpecificVirtualRanges(Account account) { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java index cca6d673dfe..81aac9e4b54 100644 --- a/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java @@ -199,7 +199,7 @@ public class ClusterDrsServiceImplTest { Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); - Mockito.when(balancedAlgorithm.needsDrs(Mockito.anyLong(), Mockito.anyList(), Mockito.anyList())).thenReturn( + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn( true, false); Mockito.when( clusterDrsService.getBestMigration(Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class), @@ -213,7 +213,7 @@ public class ClusterDrsServiceImplTest { Mockito.verify(hostDao, Mockito.times(1)).findByClusterId(1L); Mockito.verify(vmInstanceDao, Mockito.times(1)).listByClusterId(1L); - Mockito.verify(balancedAlgorithm, Mockito.times(2)).needsDrs(Mockito.anyLong(), Mockito.anyList(), + Mockito.verify(balancedAlgorithm, Mockito.times(2)).needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList()); assertEquals(1, iterations.size()); @@ -391,10 +391,10 @@ public class ClusterDrsServiceImplTest { new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn( new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); - Mockito.when(balancedAlgorithm.getMetrics(cluster.getId(), vm1, serviceOffering, destHost, new HashMap<>(), + Mockito.when(balancedAlgorithm.getMetrics(cluster, vm1, serviceOffering, destHost, new HashMap<>(), new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 0.5, 1.5)); - Mockito.when(balancedAlgorithm.getMetrics(cluster.getId(), vm2, serviceOffering, destHost, new HashMap<>(), + Mockito.when(balancedAlgorithm.getMetrics(cluster, vm2, serviceOffering, destHost, new HashMap<>(), new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 2.5, 1.5)); Pair bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm, diff --git a/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java b/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java index b5c842b8806..0b4c48e99b4 100644 --- a/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java +++ b/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java @@ -32,6 +32,7 @@ import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.NsxProviderDao; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.element.VirtualRouterProviderVO; import com.cloud.network.router.VirtualRouter.Role; @@ -75,6 +76,8 @@ public class RouterDeploymentDefinitionTest extends RouterDeploymentDefinitionTe @Mock protected NetworkVO mockNw; @Mock + PhysicalNetworkDao physicalNetworkDao; + @Mock protected NsxProviderDao nsxProviderDao; protected RouterDeploymentDefinition deployment; @@ -101,6 +104,7 @@ public class RouterDeploymentDefinitionTest extends RouterDeploymentDefinitionTe .setAccountOwner(mockOwner) .setParams(params) .build(); + deployment.pNtwkDao = physicalNetworkDao; } @Test @@ -607,6 +611,8 @@ public class RouterDeploymentDefinitionTest extends RouterDeploymentDefinitionTe @Test(expected = CloudRuntimeException.class) public void testFindVirtualProviderWithNullPhyNwSrvProvider() { // Prepare + + when(physicalNetworkDao.findById(PHYSICAL_NW_ID)).thenReturn(null); when(mockNetworkModel.getPhysicalNetworkId(deployment.guestNetwork)).thenReturn(PHYSICAL_NW_ID); final Type type = Type.VirtualRouter; when(physicalProviderDao.findByServiceProvider(PHYSICAL_NW_ID, type.toString())) diff --git a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java index d7684b824e3..272e79fea49 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java @@ -76,8 +76,6 @@ public class HeuristicRuleHelperTest { DataStore result = heuristicRuleHelperSpy.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.TEMPLATE, null); - Mockito.verify(loggerMock, Mockito.times(1)).debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", - zoneId, HeuristicType.TEMPLATE)); Assert.assertNull(result); } @@ -92,7 +90,6 @@ public class HeuristicRuleHelperTest { DataStore result = heuristicRuleHelperSpy.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.TEMPLATE, null); - Mockito.verify(loggerMock, Mockito.times(1)).debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicVOMock, zoneId)); Assert.assertNull(result); } diff --git a/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java index dab46595438..8982034a8c5 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java @@ -396,7 +396,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.unmanageVolume(volumeId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is not ready", volumeId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume %s is not ready", volumeVO)); } } @@ -409,7 +409,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.unmanageVolume(volumeId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is encrypted", volumeId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume %s is encrypted", volumeVO)); } } @@ -421,7 +421,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.unmanageVolume(volumeId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is attached to VM (ID: %s)", volumeId, volumeVO.getInstanceId())); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume %s is attached to VM (ID: %s)", volumeVO, volumeVO.getInstanceId())); } } @@ -444,7 +444,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.checkIfPoolAvailable(poolId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (name: %s) is in maintenance", storagePoolName)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool %s is in maintenance", storagePoolVO)); } } @@ -457,7 +457,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.checkIfPoolAvailable(poolId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (ID: %s) is not Up: %s", storagePoolName, StoragePoolStatus.Disabled)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool %s is not Up: %s", storagePoolVO, StoragePoolStatus.Disabled)); } } @@ -535,7 +535,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s is not active", diskOfferingId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s is not active", diskOfferingVO)); } } @@ -549,7 +549,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s should use %s storage", diskOfferingId, isLocal ? "local" : "shared")); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s should use %s storage", diskOfferingVO, isLocal ? "local" : "shared")); } } @@ -564,7 +564,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s should not support volume encryption", diskOfferingId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s should not support volume encryption", diskOfferingVO)); } } @@ -579,7 +579,7 @@ public class VolumeImportUnmanageManagerImplTest { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s is not accessible by owner %s", diskOfferingId, account)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s is not accessible by owner %s", diskOfferingVO, account)); } } diff --git a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java index 9205e8cb2c7..92681e058a2 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java @@ -636,7 +636,6 @@ public class UnmanagedVMsManagerImplTest { when(convertHost.getStatus()).thenReturn(Status.Up); when(convertHost.getResourceState()).thenReturn(ResourceState.Enabled); when(convertHost.getId()).thenReturn(convertHostId); - when(convertHost.getName()).thenReturn("KVM-Convert-Host"); when(convertHost.getType()).thenReturn(Host.Type.Routing); when(convertHost.getDataCenterId()).thenReturn(zoneId); when(convertHost.getClusterId()).thenReturn(clusterId); diff --git a/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java index cad36b962ac..c51f07e96f7 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java @@ -128,7 +128,7 @@ public class VMSchedulerImplTest { actionEventUtilsMocked.verify(() -> ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), null, actionEventMap.get(action), true, - String.format("Executing action (%s) for VM Id:%s", vmScheduledJob.getAction(), vm.getUuid()), + String.format("Executing action (%s) for VM: %s", vmScheduledJob.getAction(), vm), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), 0)); Assert.assertEquals(expectedValue, jobId); } diff --git a/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java index 9617591ae76..e8686e95ea0 100644 --- a/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java @@ -52,7 +52,7 @@ public class BucketUsageParser { public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Parsing all Bucket usage events for account: " + account.getId()); + LOGGER.debug("Parsing all Bucket usage events for account {}", account); } if ((endDate == null) || endDate.after(new Date())) { diff --git a/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java index a27e2ba226e..39a51e8554f 100644 --- a/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java @@ -53,7 +53,7 @@ public class NetworksUsageParser { } public static boolean parse(AccountVO account, Date startDate, Date endDate) { - LOGGER.debug(String.format("Parsing all networks usage events for account [%s].", account.getId())); + LOGGER.debug("Parsing all networks usage events for account {}", account); if ((endDate == null) || endDate.after(new Date())) { endDate = new Date(); } @@ -84,7 +84,7 @@ public class NetworksUsageParser { long networkId = usageNetwork.getNetworkId(); long networkOfferingId = usageNetwork.getNetworkOfferingId(); LOGGER.debug(String.format("Creating network usage record with id [%s], network offering [%s], usage [%s], startDate [%s], and endDate [%s], for account [%s].", - networkId, networkOfferingId, usageDisplay, startDate, endDate, account.getId())); + networkId, networkOfferingId, usageDisplay, startDate, endDate, account)); String description = String.format("Network usage for network ID: %d, network offering: %d", usageNetwork.getNetworkId(), usageNetwork.getNetworkOfferingId()); UsageVO usageRecord = diff --git a/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java index 5dcb5d08a6c..794e066c897 100644 --- a/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java @@ -50,7 +50,7 @@ public class VpcUsageParser { } public static boolean parse(AccountVO account, Date startDate, Date endDate) { - LOGGER.debug(String.format("Parsing all VPC usage events for account [%s].", account.getId())); + LOGGER.debug("Parsing all VPC usage events for account {}", account); if ((endDate == null) || endDate.after(new Date())) { endDate = new Date(); } @@ -79,8 +79,8 @@ public class VpcUsageParser { String usageDisplay = dFormat.format(usage); long vpcId = usageVPC.getVpcId(); - LOGGER.debug(String.format("Creating VPC usage record with id [%s], usage [%s], startDate [%s], and endDate [%s], for account [%s].", - vpcId, usageDisplay, startDate, endDate, account.getId())); + LOGGER.debug("Creating VPC usage record with id [{}], usage [{}], startDate [{}], and endDate [{}], for account [{}].", + vpcId, usageDisplay, startDate, endDate, account); String description = String.format("VPC usage for VPC ID: %d", usageVPC.getVpcId()); UsageVO usageRecord = From 9bc283e5c265f4c7714249a7cee046aadfe065c6 Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Tue, 7 Jan 2025 17:06:32 +0530 Subject: [PATCH 2/5] Introducing granular command timeouts global setting (#9659) * Introducing granular command timeouts global setting * fix marvin tests * Fixed log messages * some more log message fix * Fix empty value setting * Converted the global setting to non-dynamic * set wait on command only when granular wait is defined. This is to keep the backward compatibility * Improve error logging --- .../java/com/cloud/agent/AgentManager.java | 4 + .../cloud/agent/manager/AgentManagerImpl.java | 79 ++++++- .../agent/manager/AgentManagerImplTest.java | 20 ++ .../ConfigurationManagerImpl.java | 74 ++++++ .../ConfigurationManagerTest.java | 218 ++++++++++++------ .../integration/smoke/test_global_settings.py | 49 ++++ 6 files changed, 370 insertions(+), 74 deletions(-) diff --git a/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java b/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java index 2182dfc542d..81525ca13f1 100644 --- a/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java +++ b/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java @@ -50,6 +50,10 @@ public interface AgentManager { ConfigKey ReadyCommandWait = new ConfigKey("Advanced", Integer.class, "ready.command.wait", "60", "Time in seconds to wait for Ready command to return", true); + ConfigKey GranularWaitTimeForCommands = new ConfigKey<>("Advanced", String.class, "commands.timeout", "", + "This timeout overrides the wait global config. This holds a comma separated key value pairs containing timeout (in seconds) for specific commands. " + + "For example: DhcpEntryCommand=600, SavePasswordCommand=300, VmDataCommand=300", false); + public enum TapAgentsAction { Add, Del, Contains, } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 9333410e0aa..63e97519534 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -53,6 +53,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.BooleanUtils; import com.cloud.agent.AgentManager; @@ -139,6 +140,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected List> _cmdMonitors = new ArrayList>(17); protected List> _creationMonitors = new ArrayList>(17); protected List _loadingAgents = new ArrayList(); + protected Map _commandTimeouts = new HashMap<>(); private int _monitorId = 0; private final Lock _agentStatusLock = new ReentrantLock(); @@ -241,6 +243,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); + initializeCommandTimeouts(); + return true; } @@ -424,6 +428,62 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + protected int getTimeout(final Commands commands, int timeout) { + int result; + if (timeout > 0) { + result = timeout; + } else { + result = Wait.value(); + } + + int granularTimeout = getTimeoutFromGranularWaitTime(commands); + return (granularTimeout > 0) ? granularTimeout : result; + } + + protected int getTimeoutFromGranularWaitTime(final Commands commands) { + int maxWait = 0; + if (MapUtils.isNotEmpty(_commandTimeouts)) { + for (final Command cmd : commands) { + String simpleCommandName = cmd.getClass().getSimpleName(); + Integer commandTimeout = _commandTimeouts.get(simpleCommandName); + if (commandTimeout != null && commandTimeout > maxWait) { + maxWait = commandTimeout; + } + } + } + + return maxWait; + } + + private void initializeCommandTimeouts() { + String commandWaits = GranularWaitTimeForCommands.value().trim(); + if (StringUtils.isNotEmpty(commandWaits)) { + _commandTimeouts = getCommandTimeoutsMap(commandWaits); + logger.info(String.format("Timeouts for management server internal commands successfully initialized from global setting commands.timeout: %s", _commandTimeouts)); + } + } + + private Map getCommandTimeoutsMap(String commandWaits) { + String[] commandPairs = commandWaits.split(","); + Map commandTimeouts = new HashMap<>(); + + for (String commandPair : commandPairs) { + String[] parts = commandPair.trim().split("="); + if (parts.length == 2) { + try { + String commandName = parts[0].trim(); + int commandTimeout = Integer.parseInt(parts[1].trim()); + commandTimeouts.put(commandName, commandTimeout); + } catch (NumberFormatException e) { + logger.error(String.format("Initialising the timeouts using commands.timeout: %s for management server internal commands failed with error %s", commandPair, e.getMessage())); + } + } else { + logger.error(String.format("Error initialising the timeouts for management server internal commands. Invalid format in commands.timeout: %s", commandPair)); + } + } + return commandTimeouts; + } + @Override public Answer[] send(final Long hostId, final Commands commands, int timeout) throws AgentUnavailableException, OperationTimedoutException { assert hostId != null : "Who's not checking the agent id before sending? ... (finger wagging)"; @@ -431,8 +491,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl throw new AgentUnavailableException(-1); } - if (timeout <= 0) { - timeout = Wait.value(); + int wait = getTimeout(commands, timeout); + logger.debug(String.format("Wait time setting on %s is %d seconds", commands, wait)); + for (Command cmd : commands) { + String simpleCommandName = cmd.getClass().getSimpleName(); + Integer commandTimeout = _commandTimeouts.get(simpleCommandName); + if (commandTimeout != null) { + cmd.setWait(wait); + } } if (CheckTxnBeforeSending.value()) { @@ -454,7 +520,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Request req = new Request(hostId, agent.getName(), _nodeId, cmds, commands.stopOnError(), true); req.setSequence(agent.getNextSequence()); - final Answer[] answers = agent.send(req, timeout); + final Answer[] answers = agent.send(req, wait); notifyAnswersToMonitors(hostId, req.getSequence(), answers); commands.setAnswers(answers); return answers; @@ -997,6 +1063,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public Answer[] send(final Long hostId, final Commands cmds) throws AgentUnavailableException, OperationTimedoutException { int wait = 0; + if (cmds.size() > 1) { + logger.debug(String.format("Checking the wait time in seconds to be used for the following commands : %s. If there are multiple commands sent at once," + + "then max wait time of those will be used", cmds)); + } + for (final Command cmd : cmds) { if (cmd.getWait() > wait) { wait = cmd.getWait(); @@ -1821,7 +1892,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, - DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait }; + DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, GranularWaitTimeForCommands }; } protected class SetHostParamsListener implements Listener { diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java index 452cfd90056..52b7ed77533 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java @@ -83,4 +83,24 @@ public class AgentManagerImplTest { } Mockito.verify(mgr, Mockito.times(1)).handleDisconnectWithoutInvestigation(Mockito.any(attache.getClass()), Mockito.eq(Status.Event.AgentDisconnected), Mockito.eq(true), Mockito.eq(true)); } + + @Test + public void testGetTimeoutWithPositiveTimeout() { + Commands commands = Mockito.mock(Commands.class); + int timeout = 30; + int result = mgr.getTimeout(commands, timeout); + + Assert.assertEquals(30, result); + } + + @Test + public void testGetTimeoutWithGranularTimeout() { + Commands commands = Mockito.mock(Commands.class); + Mockito.doReturn(50).when(mgr).getTimeoutFromGranularWaitTime(commands); + + int timeout = 0; + int result = mgr.getTimeout(commands, timeout); + + Assert.assertEquals(50, result); + } } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index dee1aa81758..d7e2160ef35 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1245,6 +1245,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati type = configuration.getType(); } + validateSpecificConfigurationValues(name, value, type); + boolean isTypeValid = validateValueType(value, type); if (!isTypeValid) { return String.format("Value [%s] is not a valid [%s].", value, type); @@ -1373,6 +1375,78 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return validateIfStringValueIsInRange(name, value, range); } + /** + * Validates configuration values for the given name, value, and type. + *
    + *
  • The value must be a comma-separated list of key-value pairs, where each value must be a positive integer.
  • + *
  • Each key-value pair must be in the format "command=value", with the value being a positive integer greater than 0, + * otherwise fails with an error message
  • + *
  • Throws an {@link InvalidParameterValueException} if validation fails.
  • + *
+ * + * @param name the configuration name + * @param value the configuration value as a comma-separated string of key-value pairs + * @param type the configuration type, expected to be String + * @throws InvalidParameterValueException if validation fails with a specific error message + */ + protected void validateSpecificConfigurationValues(String name, String value, Class type) { + if (type.equals(String.class)) { + if (name.equals(AgentManager.GranularWaitTimeForCommands.toString())) { + Pair validationResult = validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(value); + if (!validationResult.first()) { + String errMsg = validationResult.second(); + logger.error(validationResult.second()); + throw new InvalidParameterValueException(errMsg); + } + } + } + } + + protected Pair validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(String value) { + try { + if (StringUtils.isNotEmpty(value)) { + String[] commands = value.split(","); + for (String command : commands) { + command = command.trim(); + if (!command.contains("=")) { + String errorMessage = String.format("Validation failed: Command '%s' does not contain '='.", command); + return new Pair<>(false, errorMessage); + } + + String[] parts = command.split("="); + if (parts.length != 2) { + String errorMessage = String.format("Validation failed: Command '%s' is not properly formatted.", command); + return new Pair<>(false, errorMessage); + } + + String commandName = parts[0].trim(); + String valueString = parts[1].trim(); + + if (commandName.isEmpty()) { + String errorMessage = String.format("Validation failed: Command name is missing in '%s'.", command); + return new Pair<>(false, errorMessage); + } + + try { + int num = Integer.parseInt(valueString); + if (num <= 0) { + String errorMessage = String.format("Validation failed: The value for command '%s' is not greater than 0. Invalid value: %d", commandName, num); + return new Pair<>(false, errorMessage); + } + } catch (NumberFormatException e) { + String errorMessage = String.format("Validation failed: The value for command '%s' is not a valid integer. Invalid value: %s", commandName, valueString); + return new Pair<>(false, errorMessage); + } + } + } + + return new Pair<>(true, ""); + } catch (Exception e) { + String errorMessage = String.format("Validation failed: An error occurred while parsing the command string. Error: %s", e.getMessage()); + return new Pair<>(false, errorMessage); + } + } + /** * Returns a boolean indicating whether a Config's range should be validated. It should not be validated when:
*
    diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index ceffe019377..badf730a061 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -17,6 +17,7 @@ package com.cloud.configuration; +import com.cloud.agent.AgentManager; import com.cloud.api.query.dao.NetworkOfferingJoinDao; import com.cloud.api.query.vo.NetworkOfferingJoinVO; import com.cloud.configuration.Resource.ResourceType; @@ -71,6 +72,7 @@ import com.cloud.user.ResourceLimitService; import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; @@ -124,7 +126,10 @@ import java.util.Set; import java.util.UUID; import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; @@ -361,7 +366,7 @@ public class ConfigurationManagerTest { try { configurationMgr.dedicatePublicIpRange(dedicatePublicIpRangesCmd); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Unable to find vlan by id")); + assertTrue(e.getMessage().contains("Unable to find vlan by id")); } finally { txn.close("runDedicatePublicIpRangeInvalidRange"); } @@ -390,7 +395,7 @@ public class ConfigurationManagerTest { try { configurationMgr.dedicatePublicIpRange(dedicatePublicIpRangesCmd); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Public IP range has already been dedicated")); + assertTrue(e.getMessage().contains("Public IP range has already been dedicated")); } finally { txn.close("runDedicatePublicIpRangePublicIpRangeDedicated"); } @@ -416,7 +421,7 @@ public class ConfigurationManagerTest { try { configurationMgr.dedicatePublicIpRange(dedicatePublicIpRangesCmd); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Public IP range can be dedicated to an account only in the zone of type Advanced")); + assertTrue(e.getMessage().contains("Public IP range can be dedicated to an account only in the zone of type Advanced")); } finally { txn.close("runDedicatePublicIpRangeInvalidZone"); } @@ -443,7 +448,7 @@ public class ConfigurationManagerTest { try { configurationMgr.dedicatePublicIpRange(dedicatePublicIpRangesCmd); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Public IP address in range is allocated to another account")); + assertTrue(e.getMessage().contains("Public IP address in range is allocated to another account")); } finally { txn.close("runDedicatePublicIpRangeIPAddressAllocated"); } @@ -465,7 +470,7 @@ public class ConfigurationManagerTest { when(configurationMgr._accountVlanMapDao.remove(anyLong())).thenReturn(true); try { Boolean result = configurationMgr.releasePublicIpRange(releasePublicIpRangesCmd); - Assert.assertTrue(result); + assertTrue(result); } catch (Exception e) { logger.info("exception in testing runReleasePublicIpRangePostiveTest1 message: " + e.toString()); } finally { @@ -499,7 +504,7 @@ public class ConfigurationManagerTest { when(configurationMgr._accountVlanMapDao.remove(anyLong())).thenReturn(true); try { Boolean result = configurationMgr.releasePublicIpRange(releasePublicIpRangesCmd); - Assert.assertTrue(result); + assertTrue(result); } catch (Exception e) { logger.info("exception in testing runReleasePublicIpRangePostiveTest2 message: " + e.toString()); } finally { @@ -514,7 +519,7 @@ public class ConfigurationManagerTest { try { configurationMgr.releasePublicIpRange(releasePublicIpRangesCmd); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Please specify a valid IP range id")); + assertTrue(e.getMessage().contains("Please specify a valid IP range id")); } finally { txn.close("runReleasePublicIpRangeInvalidIpRange"); } @@ -530,7 +535,7 @@ public class ConfigurationManagerTest { try { configurationMgr.releasePublicIpRange(releasePublicIpRangesCmd); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("as it not dedicated to any domain and any account")); + assertTrue(e.getMessage().contains("as it not dedicated to any domain and any account")); } finally { txn.close("runReleaseNonDedicatedPublicIpRange"); } @@ -570,10 +575,10 @@ public class ConfigurationManagerTest { try { configurationMgr.validateSourceNatServiceCapablities(sourceNatServiceCapabilityMap); } catch (InvalidParameterValueException e) { - Assert.assertTrue(e.getMessage(), e.getMessage().contains("Either peraccount or perzone source NAT type can be specified for SupportedSourceNatTypes")); + assertTrue(e.getMessage(), e.getMessage().contains("Either peraccount or perzone source NAT type can be specified for SupportedSourceNatTypes")); caught = true; } - Assert.assertTrue("should not be accepted", caught); + assertTrue("should not be accepted", caught); } @Test @@ -585,10 +590,10 @@ public class ConfigurationManagerTest { try { configurationMgr.validateSourceNatServiceCapablities(sourceNatServiceCapabilityMap); } catch (InvalidParameterValueException e) { - Assert.assertTrue(e.getMessage(), e.getMessage().contains("Unknown specified value for RedundantRouter")); + assertTrue(e.getMessage(), e.getMessage().contains("Unknown specified value for RedundantRouter")); caught = true; } - Assert.assertTrue("should not be accepted", caught); + assertTrue("should not be accepted", caught); } @Test @@ -600,10 +605,10 @@ public class ConfigurationManagerTest { try { configurationMgr.validateSourceNatServiceCapablities(sourceNatServiceCapabilityMap); } catch (InvalidParameterValueException e) { - Assert.assertTrue(e.getMessage(), e.getMessage().contains("Only SupportedSourceNatTypes, Network.Capability[name=RedundantRouter] capabilities can be specified for source nat service")); + assertTrue(e.getMessage(), e.getMessage().contains("Only SupportedSourceNatTypes, Network.Capability[name=RedundantRouter] capabilities can be specified for source nat service")); caught = true; } - Assert.assertTrue("should not be accepted", caught); + assertTrue("should not be accepted", caught); } @Test @@ -622,10 +627,10 @@ public class ConfigurationManagerTest { try { configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); } catch (InvalidParameterValueException e) { - Assert.assertTrue(e.getMessage(), e.getMessage().contains("(frue and talse)")); + assertTrue(e.getMessage(), e.getMessage().contains("(frue and talse)")); caught = true; } - Assert.assertTrue("should not be accepted", caught); + assertTrue("should not be accepted", caught); } @Test @@ -634,7 +639,7 @@ public class ConfigurationManagerTest { Map sourceNatServiceCapabilityMap = new HashMap<>(); sourceNatServiceCapabilityMap.put(Capability.SupportedSourceNatTypes, "peraccount"); sourceNatServiceCapabilityMap.put(Capability.RedundantRouter, "true"); - Assert.assertTrue(configurationMgr.isRedundantRouter(providers, Network.Service.SourceNat, sourceNatServiceCapabilityMap)); + assertTrue(configurationMgr.isRedundantRouter(providers, Network.Service.SourceNat, sourceNatServiceCapabilityMap)); } @Test @@ -642,7 +647,7 @@ public class ConfigurationManagerTest { Map> serviceCapabilityMap = new HashMap<>(); Map sourceNatServiceCapabilityMap = new HashMap<>(); sourceNatServiceCapabilityMap.put(Capability.SupportedSourceNatTypes, "perzone"); - Assert.assertTrue(configurationMgr.isSharedSourceNat(serviceCapabilityMap, sourceNatServiceCapabilityMap)); + assertTrue(configurationMgr.isSharedSourceNat(serviceCapabilityMap, sourceNatServiceCapabilityMap)); } @Test @@ -650,7 +655,7 @@ public class ConfigurationManagerTest { Map> serviceCapabilityMap = new HashMap<>(); Map sourceNatServiceCapabilityMap = new HashMap<>(); sourceNatServiceCapabilityMap.put(Capability.SupportedSourceNatTypes, "peraccount"); - Assert.assertFalse(configurationMgr.isSharedSourceNat(serviceCapabilityMap, sourceNatServiceCapabilityMap)); + assertFalse(configurationMgr.isSharedSourceNat(serviceCapabilityMap, sourceNatServiceCapabilityMap)); } @Test @@ -659,7 +664,7 @@ public class ConfigurationManagerTest { sourceNatServiceCapabilityMap.put(Capability.SupportedSourceNatTypes, "peraccount"); sourceNatServiceCapabilityMap.put(Capability.RedundantRouter, "True"); - Assert.assertTrue(configurationMgr.sourceNatCapabilitiesContainValidValues(sourceNatServiceCapabilityMap)); + assertTrue(configurationMgr.sourceNatCapabilitiesContainValidValues(sourceNatServiceCapabilityMap)); } @Test @@ -690,13 +695,13 @@ public class ConfigurationManagerTest { try { configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); } catch (InvalidParameterValueException e) { - Assert.assertTrue( + assertTrue( e.getMessage(), e.getMessage().contains( "Capability " + Capability.AssociatePublicIP.getName() + " can only be set when capability " + Capability.ElasticIp.getName() + " is true")); caught = true; } - Assert.assertTrue("should not be accepted", caught); + assertTrue("should not be accepted", caught); } @Test @@ -961,27 +966,27 @@ public class ConfigurationManagerTest { //Ipv4 Test boolean result; result = configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, false, null, null, null, null, null); - Assert.assertFalse(result); + assertFalse(result); try { configurationMgr.hasSameSubnet(true, "10.0.0.1", "255.255.255.0", "10.0.0.2", "255.255.255.0", "10.0.0.2", "10.0.0.10", false, null, null, null, null, null); Assert.fail(); } catch (InvalidParameterValueException e) { - Assert.assertEquals(e.getMessage(), "The gateway of the subnet should be unique. The subnet already has a gateway 10.0.0.1"); + assertEquals(e.getMessage(), "The gateway of the subnet should be unique. The subnet already has a gateway 10.0.0.1"); } try { configurationMgr.hasSameSubnet(true, "10.0.0.1", "255.255.0.0", "10.0.0.2", "255.255.255.0", "10.0.0.2", "10.0.0.10", false, null, null, null, null, null); Assert.fail(); } catch (InvalidParameterValueException e){ - Assert.assertEquals(e.getMessage(), "The subnet you are trying to add is a subset of the existing subnet having gateway 10.0.0.1 and netmask 255.255.0.0"); + assertEquals(e.getMessage(), "The subnet you are trying to add is a subset of the existing subnet having gateway 10.0.0.1 and netmask 255.255.0.0"); } try { configurationMgr.hasSameSubnet(true, "10.0.0.1", "255.255.255.0", "10.0.0.2", "255.255.0.0", "10.0.0.2", "10.0.0.10", false, null, null, null, null, null); Assert.fail(); } catch (InvalidParameterValueException e) { - Assert.assertEquals(e.getMessage(), "The subnet you are trying to add is a superset of the existing subnet having gateway 10.0.0.1 and netmask 255.255.255.0"); + assertEquals(e.getMessage(), "The subnet you are trying to add is a superset of the existing subnet having gateway 10.0.0.1 and netmask 255.255.255.0"); } result = configurationMgr.hasSameSubnet(true, "10.0.0.1", "255.255.255.0", "10.0.0.1", "255.255.255.0", "10.0.0.2", "10.0.0.10", false, null, null, null, null, null); - Assert.assertTrue(result); + assertTrue(result); //Ipv6 Test Network ipV6Network = mock(Network.class); @@ -992,35 +997,35 @@ public class ConfigurationManagerTest { doThrow(new InvalidParameterValueException("ip6Gateway and ip6Cidr should be defined when startIPv6/endIPv6 are passed in")).when(configurationMgr._networkModel).checkIp6Parameters(Mockito.anyString(), Mockito.anyString(), Mockito.isNull(String.class), Mockito.isNull(String.class)); configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, true, "2001:db8:0:f101::1", "2001:db8:0:f101::0/64", "2001:db8:0:f101::2", "2001:db8:0:f101::a", ipV6Network); - Assert.assertTrue(result); + assertTrue(result); try { configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, true, "2001:db8:0:f101::2", "2001:db8:0:f101::0/64", "2001:db8:0:f101::2", "2001:db8:0:f101::a", ipV6Network); Assert.fail(); } catch (InvalidParameterValueException e){ - Assert.assertEquals(e.getMessage(), "The input gateway 2001:db8:0:f101::2 is not same as network gateway 2001:db8:0:f101::1"); + assertEquals(e.getMessage(), "The input gateway 2001:db8:0:f101::2 is not same as network gateway 2001:db8:0:f101::1"); } try { configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, true, "2001:db8:0:f101::1", "2001:db8:0:f101::0/63", "2001:db8:0:f101::2", "2001:db8:0:f101::a", ipV6Network); Assert.fail(); } catch (InvalidParameterValueException e){ - Assert.assertEquals(e.getMessage(), "The input cidr 2001:db8:0:f101::0/63 is not same as network cidr 2001:db8:0:f101::0/64"); + assertEquals(e.getMessage(), "The input cidr 2001:db8:0:f101::0/63 is not same as network cidr 2001:db8:0:f101::0/64"); } try { configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, true, "2001:db8:0:f101::1", "2001:db8:0:f101::0/64", "2001:db9:0:f101::2", "2001:db9:0:f101::a", ipV6Network); Assert.fail(); } catch (InvalidParameterValueException e) { - Assert.assertEquals(e.getMessage(), "Exception from Mock: startIPv6 is not in ip6cidr indicated network!"); + assertEquals(e.getMessage(), "Exception from Mock: startIPv6 is not in ip6cidr indicated network!"); } try { configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, true, "2001:db8:0:f101::1", "2001:db8:0:f101::0/64", "2001:db8:0:f101::a", "2001:db9:0:f101::2", ipV6Network); Assert.fail(); } catch(InvalidParameterValueException e) { - Assert.assertEquals(e.getMessage(), "Exception from Mock: endIPv6 is not in ip6cidr indicated network!"); + assertEquals(e.getMessage(), "Exception from Mock: endIPv6 is not in ip6cidr indicated network!"); } result = configurationMgr.hasSameSubnet(false, null, null, null, null, null, null, true, null, null, "2001:db8:0:f101::2", "2001:db8:0:f101::a", ipV6Network); - Assert.assertTrue(result); + assertTrue(result); } @Test(expected = CloudRuntimeException.class) @@ -1035,12 +1040,12 @@ public class ConfigurationManagerTest { @Test public void testGetVlanNumberFromUriVlan() { - Assert.assertEquals("7", configurationMgr.getVlanNumberFromUri("vlan://7")); + assertEquals("7", configurationMgr.getVlanNumberFromUri("vlan://7")); } @Test public void testGetVlanNumberFromUriUntagged() { - Assert.assertEquals("untagged", configurationMgr.getVlanNumberFromUri("vlan://untagged")); + assertEquals("untagged", configurationMgr.getVlanNumberFromUri("vlan://untagged")); } @Test @@ -1080,48 +1085,48 @@ public class ConfigurationManagerTest { @Test public void shouldUpdateDiskOfferingTests(){ - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(Mockito.anyString(), Mockito.anyString(), Mockito.anyInt(), Mockito.anyBoolean(), Mockito.anyString(), Mockito.anyString(), Mockito.any(DiskOffering.State.class))); - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(Mockito.anyString(), nullable(String.class), nullable(Integer.class), nullable(Boolean.class), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), nullable(Integer.class), nullable(Boolean.class), nullable(String.class), nullable(String.class), Mockito.any(DiskOffering.State.class))); - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), Mockito.anyString(), nullable(Integer.class), nullable(Boolean.class), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), Mockito.anyInt(), nullable(Boolean.class), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), nullable(int.class), Mockito.anyBoolean(), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); - Assert.assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), nullable(int.class), nullable(Boolean.class), Mockito.anyString(), Mockito.anyString(), nullable(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(Mockito.anyString(), Mockito.anyString(), Mockito.anyInt(), Mockito.anyBoolean(), Mockito.anyString(), Mockito.anyString(), Mockito.any(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(Mockito.anyString(), nullable(String.class), nullable(Integer.class), nullable(Boolean.class), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), nullable(Integer.class), nullable(Boolean.class), nullable(String.class), nullable(String.class), Mockito.any(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), Mockito.anyString(), nullable(Integer.class), nullable(Boolean.class), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), Mockito.anyInt(), nullable(Boolean.class), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), nullable(int.class), Mockito.anyBoolean(), nullable(String.class), nullable(String.class), nullable(DiskOffering.State.class))); + assertTrue(configurationMgr.shouldUpdateDiskOffering(nullable(String.class), nullable(String.class), nullable(int.class), nullable(Boolean.class), Mockito.anyString(), Mockito.anyString(), nullable(DiskOffering.State.class))); } @Test public void shouldUpdateDiskOfferingTestFalse(){ - Assert.assertFalse(configurationMgr.shouldUpdateDiskOffering(null, null, null, null, null, null, null)); + assertFalse(configurationMgr.shouldUpdateDiskOffering(null, null, null, null, null, null, null)); } @Test public void shouldUpdateIopsRateParametersTestFalse() { - Assert.assertFalse(configurationMgr.shouldUpdateIopsRateParameters(null, null, null, null, null, null)); + assertFalse(configurationMgr.shouldUpdateIopsRateParameters(null, null, null, null, null, null)); } @Test public void shouldUpdateIopsRateParametersTests(){ - Assert.assertTrue(configurationMgr.shouldUpdateIopsRateParameters(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong())); - Assert.assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong())); + assertTrue(configurationMgr.shouldUpdateIopsRateParameters(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong())); + assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateIopsRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong())); } @Test public void shouldUpdateBytesRateParametersTestFalse() { - Assert.assertFalse(configurationMgr.shouldUpdateBytesRateParameters(null, null, null, null, null, null)); + assertFalse(configurationMgr.shouldUpdateBytesRateParameters(null, null, null, null, null, null)); } @Test public void shouldUpdateBytesRateParametersTests(){ - Assert.assertTrue(configurationMgr.shouldUpdateBytesRateParameters(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong())); - Assert.assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class))); - Assert.assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong())); + assertTrue(configurationMgr.shouldUpdateBytesRateParameters(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong())); + assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong(), nullable(Long.class))); + assertTrue(configurationMgr.shouldUpdateBytesRateParameters(nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.anyLong())); } @Test @@ -1235,10 +1240,10 @@ public class ConfigurationManagerTest { return prefixVO; }); configurationMgr.createDataCenterGuestIpv6Prefix(cmd); - Assert.assertEquals(1, persistedPrefix.size()); + assertEquals(1, persistedPrefix.size()); DataCenterGuestIpv6PrefixVO prefixVO = persistedPrefix.get(0); - Assert.assertEquals(zoneId, prefixVO.getDataCenterId()); - Assert.assertEquals(prefix, prefixVO.getPrefix()); + assertEquals(zoneId, prefixVO.getDataCenterId()); + assertEquals(prefix, prefixVO.getPrefix()); } @Test @@ -1255,17 +1260,17 @@ public class ConfigurationManagerTest { Mockito.mock(DataCenterGuestIpv6PrefixVO.class), Mockito.mock(DataCenterGuestIpv6PrefixVO.class))); List prefixes = configurationMgr.listDataCenterGuestIpv6Prefixes(cmd); - Assert.assertEquals(1, prefixes.size()); + assertEquals(1, prefixes.size()); ListGuestNetworkIpv6PrefixesCmd cmd1 = Mockito.mock(ListGuestNetworkIpv6PrefixesCmd.class); Mockito.when(cmd1.getId()).thenReturn(null); Mockito.when(cmd1.getZoneId()).thenReturn(1L); prefixes = configurationMgr.listDataCenterGuestIpv6Prefixes(cmd1); - Assert.assertEquals(2, prefixes.size()); + assertEquals(2, prefixes.size()); ListGuestNetworkIpv6PrefixesCmd cmd2 = Mockito.mock(ListGuestNetworkIpv6PrefixesCmd.class); Mockito.when(cmd2.getId()).thenReturn(null); Mockito.when(cmd2.getZoneId()).thenReturn(null); prefixes = configurationMgr.listDataCenterGuestIpv6Prefixes(cmd2); - Assert.assertEquals(3, prefixes.size()); + assertEquals(3, prefixes.size()); } @Test(expected = InvalidParameterValueException.class) @@ -1304,8 +1309,8 @@ public class ConfigurationManagerTest { return true; }); configurationMgr.deleteDataCenterGuestIpv6Prefix(cmd); - Assert.assertEquals(1, removedPrefix.size()); - Assert.assertEquals(prefixId, removedPrefix.get(0)); + assertEquals(1, removedPrefix.size()); + assertEquals(prefixId, removedPrefix.get(0)); } @Test(expected = InvalidParameterValueException.class) @@ -1355,8 +1360,8 @@ public class ConfigurationManagerTest { mockPersistDatacenterForCreateZone(); DataCenter zone = configurationMgr.createZone(cmd); Assert.assertNotNull(zone); - Assert.assertEquals(NetworkType.Advanced, zone.getNetworkType()); - Assert.assertEquals(DataCenter.Type.Edge, zone.getType()); + assertEquals(NetworkType.Advanced, zone.getNetworkType()); + assertEquals(DataCenter.Type.Edge, zone.getType()); } @Test @@ -1368,8 +1373,8 @@ public class ConfigurationManagerTest { mockPersistDatacenterForCreateZone(); DataCenter zone = configurationMgr.createZone(cmd); Assert.assertNotNull(zone); - Assert.assertEquals(NetworkType.Advanced, zone.getNetworkType()); - Assert.assertEquals(DataCenter.Type.Core, zone.getType()); + assertEquals(NetworkType.Advanced, zone.getNetworkType()); + assertEquals(DataCenter.Type.Core, zone.getType()); } @Test @@ -1381,7 +1386,7 @@ public class ConfigurationManagerTest { mockPersistDatacenterForCreateZone(); DataCenter zone = configurationMgr.createZone(cmd); Assert.assertNotNull(zone); - Assert.assertEquals(NetworkType.Basic, zone.getNetworkType()); + assertEquals(NetworkType.Basic, zone.getNetworkType()); } @Test(expected = InvalidParameterValueException.class) @@ -1428,4 +1433,77 @@ public class ConfigurationManagerTest { Mockito.doNothing().when(messageBus).publish(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); configurationMgr.createPod(zoneId, "TestPod", null, null, null, null, null); } + + @Test + public void testValidateSpecificConfigurationValues_ValidFormatWithPositiveIntegers() { + String name = AgentManager.GranularWaitTimeForCommands.toString(); + String validValue = "CopyCommand=120, DeleteCommand= 60"; + + try { + configurationMgr.validateSpecificConfigurationValues(name, validValue, String.class); + } catch (InvalidParameterValueException e) { + Assert.fail("Exception should not be thrown for a valid command string with positive integers, but there is an error " + e); + } + } + + @Test + public void testValidateSpecificConfigurationValues_InvalidFormat() { + String name = AgentManager.GranularWaitTimeForCommands.toString(); + String invalidValue = "{\"CopyCommand\": 120}"; + + try { + configurationMgr.validateSpecificConfigurationValues(name, invalidValue, String.class); + Assert.fail("Exception should be thrown for an invalid command string."); + } catch (InvalidParameterValueException e) { + assertTrue(e.getMessage().contains("does not contain '='.")); + } + } + + @Test + public void testValidCommandString() { + final String input = "DhcpEntryCommand=600, SavePasswordCommand=300, VmDataCommand=300"; + final Pair result = configurationMgr.validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(input); + assertTrue("Expected validation to pass", result.first()); + assertEquals("Expected no error message", "", result.second()); + } + + @Test + public void testInvalidCommandValue() { + final String input = "DhcpEntryCommand=600, SavePasswordCommand=300, VmDataCommand=invalid"; + final Pair result = configurationMgr.validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(input); + assertFalse("Expected validation to fail", result.first()); + assertEquals("Expected specific error message", + "Validation failed: The value for command 'VmDataCommand' is not a valid integer. Invalid value: invalid", + result.second()); + } + + @Test + public void testCommandWithZeroValue() { + final String input = "DhcpEntryCommand=600, SavePasswordCommand=0, VmDataCommand=300"; + final Pair result = configurationMgr.validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(input); + assertFalse("Expected validation to fail", result.first()); + assertEquals("Expected specific error message", + "Validation failed: The value for command 'SavePasswordCommand' is not greater than 0. Invalid value: 0", + result.second()); + } + + @Test + public void testCommandWithNegativeValue() { + final String input = "DhcpEntryCommand=-100, SavePasswordCommand=300, VmDataCommand=300"; + final Pair result = configurationMgr.validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(input); + assertFalse("Expected validation to fail", result.first()); + assertEquals("Expected specific error message", + "Validation failed: The value for command 'DhcpEntryCommand' is not greater than 0. Invalid value: -100", + result.second()); + } + + @Test + public void testInvalidCommandStructure() { + final String input = "DhcpEntryCommand600, SavePasswordCommand=300, VmDataCommand=300"; + final Pair result = configurationMgr.validateCommaSeparatedKeyValueConfigWithPositiveIntegerValues(input); + assertFalse("Expected validation to fail", result.first()); + assertEquals("Expected specific error message", + "Validation failed: Command 'DhcpEntryCommand600' does not contain '='.", + result.second()); + } } diff --git a/test/integration/smoke/test_global_settings.py b/test/integration/smoke/test_global_settings.py index 2018384ab3a..53f55736d4f 100644 --- a/test/integration/smoke/test_global_settings.py +++ b/test/integration/smoke/test_global_settings.py @@ -76,6 +76,12 @@ class TestUpdateConfigWithScope(cloudstackTestCase): updateConfigurationCmd.scopeid = 1 self.apiClient.updateConfiguration(updateConfigurationCmd) + + updateConfigurationCmd = updateConfiguration.updateConfigurationCmd() + updateConfigurationCmd.name = "commands.timeout" + updateConfigurationCmd.value = "" + self.apiClient.updateConfiguration(updateConfigurationCmd) + class TestListConfigurations(cloudstackTestCase): """ Test to list configurations (global settings) @@ -181,3 +187,46 @@ class TestListConfigurations(cloudstackTestCase): subgroup=subgroup) self.assertNotEqual(len(listConfigurationsResponse), 0, "Check if the list configurations API returns a non-empty response") self.debug("Total %d configurations for group %s, subgroup %s" % (len(listConfigurationsResponse), group, subgroup)) + + @attr(tags=["devcloud", "basic", "advanced"], required_hardware="false") + def test_UpdateCommandsTimeoutConfigParamWithValidValue(self): + """ + test update configuration setting for commands.timeout with valid value + @return: + """ + updateConfigurationCmd = updateConfiguration.updateConfigurationCmd() + updateConfigurationCmd.name = "commands.timeout" + updateConfigurationCmd.value = "DhcpEntryCommand= 600, SavePasswordCommand= 300, VmDataCommand= 300" + + updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd) + self.debug("updated the parameter %s with value %s" % (updateConfigurationResponse.name, updateConfigurationResponse.value)) + + listConfigurationsCmd = listConfigurations.listConfigurationsCmd() + listConfigurationsCmd.name = updateConfigurationResponse.name + listConfigurationsResponse = self.apiClient.listConfigurations(listConfigurationsCmd) + + for item in listConfigurationsResponse: + if item.name == updateConfigurationResponse.name: + configParam = item + + self.assertEqual(configParam.value, updateConfigurationResponse.value, "Check if the update API returned is the same as the one we got in the list API") + + + @attr(tags=["devcloud", "basic", "advanced"], required_hardware="false") + def test_UpdateCommandsTimeoutConfigParamWithInvalidValue(self): + """ + Test update configuration setting for commands.timeout with invalid valid value + @return: + """ + updateConfigurationCmd = updateConfiguration.updateConfigurationCmd() + updateConfigurationCmd.name = "commands.timeout" + updateConfigurationCmd.value = "StartCommand: 1" # Intentionally providing invalid format + + try: + self.apiClient.updateConfiguration(updateConfigurationCmd) + self.fail("API call should have failed due to invalid format, but it succeeded.") + except Exception as e: + self.debug("Caught expected exception: %s" % str(e)) + error_message = str(e) + self.assertIn("errorCode: 431", error_message, "Expected error code 431 for invalid format") + self.assertIn("Validation failed", error_message, "Expected validation failure message") From bd488c4bba019435515e48075d4437fef5e01aec Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 7 Jan 2025 17:17:12 +0530 Subject: [PATCH 3/5] server, plugin: enhance storage stats for IOPS (#10034) Adds framework layer change to allow retrieving and storing IOPS stats for storage pools. Custom `PrimaryStoreDriver` can implement method - `getStorageIopsStats` for returning IOPS stats. Existing method `getUsedIops` can also be overridden by such plugins when only used IOPS is returned. For testing purpose, implementation has been added for simulator hypervisor plugin to return capacity and used IOPS for a pool. For local storage pool, implementation has been added using iostat to return currently used IOPS. StoragePoolResponse class has been updated to return IOPS values which allows showing IOPS values in UI for different storage pool related views and APIs. Signed-off-by: Abhishek Kumar --- .../java/com/cloud/storage/StorageStats.java | 3 + .../apache/cloudstack/api/ApiConstants.java | 1 + .../api/response/StoragePoolResponse.java | 12 ++ .../agent/api/GetStorageStatsAnswer.java | 36 ++++- .../agent/api/GetStorageStatsAnswerTest.java | 81 ++++++++++ debian/control | 2 +- .../api/storage/PrimaryDataStoreDriver.java | 8 + .../upgrade/dao/Upgrade41700to41710.java | 68 ++++++-- .../storage/datastore/db/StoragePoolVO.java | 11 ++ .../META-INF/db/schema-42000to42010.sql | 3 + .../db/views/cloud.storage_pool_view.sql | 2 + .../upgrade/dao/Upgrade41700to41710Test.java | 123 +++++++++++++++ packaging/el8/cloud.spec | 1 + .../LibvirtGetStorageStatsCommandWrapper.java | 3 +- .../kvm/storage/KVMStoragePool.java | 8 + .../kvm/storage/LibvirtStorageAdaptor.java | 69 ++++++-- .../kvm/storage/LibvirtStoragePool.java | 28 +++- .../storage/LibvirtStorageAdaptorTest.java | 94 ++++++++++- .../agent/manager/MockStorageManagerImpl.java | 8 +- .../cloud/simulator/dao/MockVolumeDao.java | 2 + .../simulator/dao/MockVolumeDaoImpl.java | 12 ++ .../api/query/dao/StoragePoolJoinDaoImpl.java | 9 +- .../cloud/api/query/vo/StoragePoolJoinVO.java | 14 ++ .../java/com/cloud/server/StatsCollector.java | 28 +++- .../com/cloud/storage/StorageManagerImpl.java | 23 ++- .../com/cloud/server/StatsCollectorTest.java | 148 ++++++++++++++---- .../cloud/storage/StorageManagerImplTest.java | 96 +++++++++--- ui/public/locales/en.json | 1 + .../config/section/infra/primaryStorages.js | 2 +- 29 files changed, 788 insertions(+), 108 deletions(-) create mode 100644 core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java create mode 100644 engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java diff --git a/api/src/main/java/com/cloud/storage/StorageStats.java b/api/src/main/java/com/cloud/storage/StorageStats.java index a474b23489c..502e2aaae40 100644 --- a/api/src/main/java/com/cloud/storage/StorageStats.java +++ b/api/src/main/java/com/cloud/storage/StorageStats.java @@ -26,4 +26,7 @@ public interface StorageStats { * @return bytes capacity of the storage server */ public long getCapacityBytes(); + + Long getCapacityIops(); + Long getUsedIops(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index bf8b79b29d0..cf03f1d2699 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -509,6 +509,7 @@ public class ApiConstants { public static final String URL = "url"; public static final String USAGE_INTERFACE = "usageinterface"; public static final String USED_SUBNETS = "usedsubnets"; + public static final String USED_IOPS = "usediops"; public static final String USER_DATA = "userdata"; public static final String USER_DATA_NAME = "userdataname"; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java index 06d5103d731..676803ea86b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -97,6 +97,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "total min IOPS currently in use by volumes") private Long allocatedIops; + @SerializedName(ApiConstants.USED_IOPS) + @Param(description = "total IOPS currently in use", since = "4.20.1") + private Long usedIops; + @SerializedName(ApiConstants.STORAGE_CUSTOM_STATS) @Param(description = "the storage pool custom stats", since = "4.18.1") private Map customStats; @@ -312,6 +316,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { this.allocatedIops = allocatedIops; } + public Long getUsedIops() { + return usedIops; + } + + public void setUsedIops(Long usedIops) { + this.usedIops = usedIops; + } + public Map getCustomStats() { return customStats; } diff --git a/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java b/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java index 26e7b749586..79753661066 100644 --- a/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java @@ -27,24 +27,46 @@ public class GetStorageStatsAnswer extends Answer implements StorageStats { protected GetStorageStatsAnswer() { } - protected long used; + protected long usedBytes; - protected long capacity; + protected long capacityBytes; + + protected Long capacityIops; + + protected Long usedIops; @Override public long getByteUsed() { - return used; + return usedBytes; } @Override public long getCapacityBytes() { - return capacity; + return capacityBytes; } - public GetStorageStatsAnswer(GetStorageStatsCommand cmd, long capacity, long used) { + @Override + public Long getCapacityIops() { + return capacityIops; + } + + @Override + public Long getUsedIops() { + return usedIops; + } + + public GetStorageStatsAnswer(GetStorageStatsCommand cmd, long capacityBytes, long usedBytes) { super(cmd, true, null); - this.capacity = capacity; - this.used = used; + this.capacityBytes = capacityBytes; + this.usedBytes = usedBytes; + } + + public GetStorageStatsAnswer(GetStorageStatsCommand cmd, long capacityBytes, long usedBytes, Long capacityIops, Long usedIops) { + super(cmd, true, null); + this.capacityBytes = capacityBytes; + this.usedBytes = usedBytes; + this.capacityIops = capacityIops; + this.usedIops = usedIops; } public GetStorageStatsAnswer(GetStorageStatsCommand cmd, String details) { diff --git a/core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java b/core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java new file mode 100644 index 00000000000..44af83ada2d --- /dev/null +++ b/core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class GetStorageStatsAnswerTest { + + @Test + public void testDefaultConstructor() { + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(); + + Assert.assertEquals(0, answer.getByteUsed()); + Assert.assertEquals(0, answer.getCapacityBytes()); + Assert.assertNull(answer.getCapacityIops()); + Assert.assertNull(answer.getUsedIops()); + } + + @Test + public void testConstructorWithCapacityAndUsedBytes() { + GetStorageStatsCommand mockCmd = new GetStorageStatsCommand(); + long capacityBytes = 1024L; + long usedBytes = 512L; + + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(mockCmd, capacityBytes, usedBytes); + + Assert.assertEquals(capacityBytes, answer.getCapacityBytes()); + Assert.assertEquals(usedBytes, answer.getByteUsed()); + Assert.assertNull(answer.getCapacityIops()); + Assert.assertNull(answer.getUsedIops()); + } + + @Test + public void testConstructorWithIops() { + GetStorageStatsCommand mockCmd = new GetStorageStatsCommand(); + long capacityBytes = 2048L; + long usedBytes = 1024L; + Long capacityIops = 1000L; + Long usedIops = 500L; + + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(mockCmd, capacityBytes, usedBytes, capacityIops, usedIops); + + Assert.assertEquals(capacityBytes, answer.getCapacityBytes()); + Assert.assertEquals(usedBytes, answer.getByteUsed()); + Assert.assertEquals(capacityIops, answer.getCapacityIops()); + Assert.assertEquals(usedIops, answer.getUsedIops()); + } + + @Test + public void testErrorConstructor() { + GetStorageStatsCommand mockCmd = new GetStorageStatsCommand(); + String errorDetails = "An error occurred"; + + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(mockCmd, errorDetails); + + Assert.assertFalse(answer.getResult()); + Assert.assertEquals(errorDetails, answer.getDetails()); + Assert.assertEquals(0, answer.getCapacityBytes()); + Assert.assertEquals(0, answer.getByteUsed()); + Assert.assertNull(answer.getCapacityIops()); + Assert.assertNull(answer.getUsedIops()); + } +} diff --git a/debian/control b/debian/control index c0cb95af035..a773844c27c 100644 --- a/debian/control +++ b/debian/control @@ -24,7 +24,7 @@ Description: CloudStack server library Package: cloudstack-agent Architecture: all -Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd +Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd, sysstat Recommends: init-system-helpers Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 2011b1f08fb..c8d9015af90 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -111,6 +111,14 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver { */ Pair getStorageStats(StoragePool storagePool); + /** + * Intended for managed storage + * returns the capacity and used IOPS or null if not supported + */ + default Pair getStorageIopsStats(StoragePool storagePool) { + return null; + } + /** * intended for managed storage * returns true if the storage can provide the volume stats (physical and virtual size) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java index e3eb2bf514d..266401e0c31 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java @@ -23,12 +23,16 @@ import java.util.List; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDaoImpl; import com.cloud.upgrade.SystemVmTemplateRegistration; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade41700to41710 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { @@ -95,24 +99,58 @@ public class Upgrade41700to41710 extends DbUpgradeAbstractImpl implements DbUpgr } } - private void updateStorPoolStorageType() { - storageDao = new PrimaryDataStoreDaoImpl(); - List storPoolPools = storageDao.findPoolsByProvider("StorPool"); - for (StoragePoolVO storagePoolVO : storPoolPools) { - if (StoragePoolType.SharedMountPoint == storagePoolVO.getPoolType()) { - storagePoolVO.setPoolType(StoragePoolType.StorPool); - storageDao.update(storagePoolVO.getId(), storagePoolVO); - } - updateStorageTypeForStorPoolVolumes(storagePoolVO.getId()); + protected PrimaryDataStoreDao getStorageDao() { + if (storageDao == null) { + storageDao = new PrimaryDataStoreDaoImpl(); } + return storageDao; } - private void updateStorageTypeForStorPoolVolumes(long storagePoolId) { - volumeDao = new VolumeDaoImpl(); - List volumes = volumeDao.findByPoolId(storagePoolId, null); - for (VolumeVO volumeVO : volumes) { - volumeVO.setPoolType(StoragePoolType.StorPool); - volumeDao.update(volumeVO.getId(), volumeVO); + protected VolumeDao getVolumeDao() { + if (volumeDao == null) { + volumeDao = new VolumeDaoImpl(); } + return volumeDao; + } + + /* + GenericDao.customSearch using GenericSearchBuilder and GenericDao.update using + GenericDao.createSearchBuilder used here to prevent any future issues when new fields + are added to StoragePoolVO or VolumeVO and this upgrade path starts to fail. + */ + protected void updateStorPoolStorageType() { + StoragePoolVO pool = getStorageDao().createForUpdate(); + pool.setPoolType(StoragePoolType.StorPool); + SearchBuilder sb = getStorageDao().createSearchBuilder(); + sb.and("provider", sb.entity().getStorageProviderName(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getPoolType(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("provider", StoragePoolType.StorPool.name()); + sc.setParameters("type", StoragePoolType.SharedMountPoint.name()); + getStorageDao().update(pool, sc); + + GenericSearchBuilder gSb = getStorageDao().createSearchBuilder(Long.class); + gSb.selectFields(gSb.entity().getId()); + gSb.and("provider", gSb.entity().getStorageProviderName(), SearchCriteria.Op.EQ); + gSb.done(); + SearchCriteria gSc = gSb.create(); + gSc.setParameters("provider", StoragePoolType.StorPool.name()); + List poolIds = getStorageDao().customSearch(gSc, null); + updateStorageTypeForStorPoolVolumes(poolIds); + } + + protected void updateStorageTypeForStorPoolVolumes(List storagePoolIds) { + if (CollectionUtils.isEmpty(storagePoolIds)) { + return; + } + VolumeVO volume = getVolumeDao().createForUpdate(); + volume.setPoolType(StoragePoolType.StorPool); + SearchBuilder sb = getVolumeDao().createSearchBuilder(); + sb.and("poolId", sb.entity().getPoolId(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("poolId", storagePoolIds.toArray()); + getVolumeDao().update(volume, sc); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 92a444bd83f..c2f5d0a5d96 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -119,6 +119,9 @@ public class StoragePoolVO implements StoragePool { @Column(name = "capacity_iops", updatable = true, nullable = true) private Long capacityIops; + @Column(name = "used_iops", updatable = true, nullable = true) + private Long usedIops; + @Column(name = "hypervisor") @Convert(converter = HypervisorTypeConverter.class) private HypervisorType hypervisor; @@ -256,6 +259,14 @@ public class StoragePoolVO implements StoragePool { return capacityIops; } + public Long getUsedIops() { + return usedIops; + } + + public void setUsedIops(Long usedIops) { + this.usedIops = usedIops; + } + @Override public Long getClusterId() { return clusterId; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql index aef99dd0c7f..8b70cce3404 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql @@ -32,3 +32,6 @@ CALL `cloud`.`IDEMPOTENT_ADD_FOREIGN_KEY`('cloud.mshost_peer', 'fk_mshost_peer__ -- Add last_id to the volumes table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'last_id', 'bigint(20) unsigned DEFAULT NULL'); + +-- Add used_iops column to support IOPS data in storage stats +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.storage_pool', 'used_iops', 'bigint unsigned DEFAULT NULL COMMENT "IOPS currently in use for this storage pool" '); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql index e6cc9458208..5d7585baa3b 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql @@ -31,7 +31,9 @@ SELECT `storage_pool`.`created` AS `created`, `storage_pool`.`removed` AS `removed`, `storage_pool`.`capacity_bytes` AS `capacity_bytes`, + `storage_pool`.`used_bytes` AS `used_bytes`, `storage_pool`.`capacity_iops` AS `capacity_iops`, + `storage_pool`.`used_iops` AS `used_iops`, `storage_pool`.`scope` AS `scope`, `storage_pool`.`hypervisor` AS `hypervisor`, `storage_pool`.`storage_provider_name` AS `storage_provider_name`, diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java new file mode 100644 index 00000000000..ad7c0cede25 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.util.Collections; +import java.util.List; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDaoImpl; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class Upgrade41700to41710Test { + @Spy + Upgrade41700to41710 upgrade41700to41710; + + @Test + public void testGetStorageDao_FirstInvocationCreatesInstance() { + PrimaryDataStoreDao dao1 = upgrade41700to41710.getStorageDao(); + Assert.assertNotNull(dao1); + Assert.assertTrue(dao1 instanceof PrimaryDataStoreDaoImpl); + } + + @Test + public void testGetStorageDao_SubsequentInvocationReturnsSameInstance() { + PrimaryDataStoreDao dao1 = upgrade41700to41710.getStorageDao(); + PrimaryDataStoreDao dao2 = upgrade41700to41710.getStorageDao(); + Assert.assertSame(dao1, dao2); + } + + @Test + public void testGetVolumeDao_FirstInvocationCreatesInstance() { + VolumeDao dao1 = upgrade41700to41710.getVolumeDao(); + Assert.assertNotNull(dao1); + Assert.assertTrue(dao1 instanceof VolumeDaoImpl); + } + + @Test + public void testGetVolumeDao_SubsequentInvocationReturnsSameInstance() { + VolumeDao dao1 = upgrade41700to41710.getVolumeDao(); + VolumeDao dao2 = upgrade41700to41710.getVolumeDao(); + Assert.assertSame(dao1, dao2); + } + + @Test + public void testUpdateStorPoolStorageType_WithPoolIds() { + PrimaryDataStoreDao storageDao = Mockito.mock(PrimaryDataStoreDao.class); + Mockito.doReturn(storageDao).when(upgrade41700to41710).getStorageDao(); + StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); + SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); + Mockito.when(storageDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(searchBuilder.entity()).thenReturn(pool); + Mockito.when(searchBuilder.create()).thenReturn(Mockito.mock(SearchCriteria.class)); + GenericSearchBuilder gSb = Mockito.mock(GenericSearchBuilder.class); + Mockito.doReturn(gSb).when(storageDao).createSearchBuilder(Mockito.any()); + Mockito.when(gSb.create()).thenReturn(Mockito.mock(SearchCriteria.class)); + Mockito.when(gSb.entity()).thenReturn(pool); + Mockito.when(storageDao.createForUpdate()).thenReturn(pool); + Mockito.doNothing().when(upgrade41700to41710).updateStorageTypeForStorPoolVolumes(Mockito.any()); + + Mockito.when(storageDao.update(Mockito.any(StoragePoolVO.class), Mockito.any())).thenReturn(2); + Mockito.when(storageDao.customSearch(Mockito.any(), Mockito.any())).thenReturn(List.of(1L, 2L)); + upgrade41700to41710.updateStorPoolStorageType(); + Mockito.verify(storageDao, Mockito.times(1)).update(Mockito.any(StoragePoolVO.class), Mockito.any()); + Mockito.verify(upgrade41700to41710, Mockito.times(1)).updateStorageTypeForStorPoolVolumes(Mockito.any()); + } + + @Test + public void testUpdateStorageTypeForStorPoolVolumes_EmptyPoolIds() { + VolumeDao volumeDao = Mockito.mock(VolumeDao.class); + List storagePoolIds = Collections.emptyList(); + upgrade41700to41710.updateStorageTypeForStorPoolVolumes(storagePoolIds); + Mockito.verify(volumeDao, Mockito.never()).update(Mockito.any(VolumeVO.class), Mockito.any()); + } + + @Test + public void testUpdateStorageTypeForStorPoolVolumes_WithPoolIds() { + VolumeDao volumeDao = Mockito.mock(VolumeDao.class); + List storagePoolIds = List.of(1L, 2L, 3L); + VolumeVO volume = Mockito.mock(VolumeVO.class); + SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); + SearchCriteria searchCriteria = Mockito.mock(SearchCriteria.class); + Mockito.when(volumeDao.createForUpdate()).thenReturn(volume); + Mockito.when(volumeDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(searchBuilder.entity()).thenReturn(volume); + Mockito.when(searchBuilder.create()).thenReturn(searchCriteria); + Mockito.when(volumeDao.update(Mockito.any(VolumeVO.class), Mockito.any())).thenReturn(3); + Mockito.doReturn(volumeDao).when(upgrade41700to41710).getVolumeDao(); + upgrade41700to41710.updateStorageTypeForStorPoolVolumes(storagePoolIds); + Mockito.verify(volumeDao).createForUpdate(); + Mockito.verify(volume).setPoolType(Storage.StoragePoolType.StorPool); + Mockito.verify(volumeDao).update(Mockito.eq(volume), Mockito.eq(searchCriteria)); + Mockito.verify(searchCriteria).setParameters("poolId", storagePoolIds.toArray()); + } +} diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index eb03cfe0df4..e34778820cb 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -118,6 +118,7 @@ Requires: cryptsetup Requires: rng-tools Requires: (libgcrypt > 1.8.3 or libgcrypt20) Requires: (selinux-tools if qemu-tools) +Requires: sysstat Provides: cloud-agent Group: System Environment/Libraries %description agent diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java index d00f5b540e2..419b5449258 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java @@ -40,7 +40,8 @@ public final class LibvirtGetStorageStatsCommandWrapper extends CommandWrapper commands = new ArrayList<>(); + commands.add(new String[]{ + Script.getExecutableAbsolutePath("bash"), + "-c", + String.format( + "%s %s | %s 'NR==2 {print $1}'", + Script.getExecutableAbsolutePath("df"), + pool.getLocalPath(), + Script.getExecutableAbsolutePath("awk") + ) + }); + String result = Script.executePipedCommands(commands, 1000).second(); + if (StringUtils.isBlank(result)) { + return; + } + result = result.trim(); + commands.add(new String[]{ + Script.getExecutableAbsolutePath("bash"), + "-c", + String.format( + "%s -z %s 1 2 | %s 'NR==7 {print $2}'", + Script.getExecutableAbsolutePath("iostat"), + result, + Script.getExecutableAbsolutePath("awk") + ) + }); + result = Script.executePipedCommands(commands, 10000).second(); + logger.trace("Pool used IOPS result: {}", result); + if (StringUtils.isBlank(result)) { + return; + } + try { + double doubleValue = Double.parseDouble(result); + pool.setUsedIops((long) doubleValue); + logger.debug("Updated used IOPS: {} for pool: {}", pool.getUsedIops(), pool.getName()); + } catch (NumberFormatException e) { + logger.warn(String.format("Unable to parse retrieved used IOPS: %s for pool: %s", result, + pool.getName())); + } + } + @Override public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { logger.info("Trying to fetch storage pool " + uuid + " from libvirt"); @@ -591,6 +639,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } pool.setCapacity(storage.getInfo().capacity); pool.setUsed(storage.getInfo().allocation); + updateLocalPoolIops(pool); pool.setAvailable(storage.getInfo().available); logger.debug("Successfully refreshed pool " + uuid + diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 560020cad38..8e5af7c613d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -43,6 +43,8 @@ public class LibvirtStoragePool implements KVMStoragePool { protected String uuid; protected long capacity; protected long used; + protected Long capacityIops; + protected Long usedIops; protected long available; protected String name; protected String localPath; @@ -81,20 +83,38 @@ public class LibvirtStoragePool implements KVMStoragePool { this.used = used; } - public void setAvailable(long available) { - this.available = available; - } - @Override public long getUsed() { return this.used; } + @Override + public Long getCapacityIops() { + return capacityIops; + } + + public void setCapacityIops(Long capacityIops) { + this.capacityIops = capacityIops; + } + + @Override + public Long getUsedIops() { + return usedIops; + } + + public void setUsedIops(Long usedIops) { + this.usedIops = usedIops; + } + @Override public long getAvailable() { return this.available; } + public void setAvailable(long available) { + this.available = available; + } + public StoragePoolType getStoragePoolType() { return this.type; } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java index c2bbff7efb0..88346abd017 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java @@ -17,18 +17,22 @@ package com.cloud.hypervisor.kvm.storage; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.never; + import java.util.HashMap; import java.util.Map; import java.util.UUID; -import com.cloud.utils.exception.CloudRuntimeException; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.libvirt.Connect; import org.libvirt.StoragePool; -import org.libvirt.StoragePoolInfo; +import org.mockito.Mock; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; @@ -38,6 +42,9 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolDef; import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; @RunWith(MockitoJUnitRunner.class) public class LibvirtStorageAdaptorTest { @@ -46,6 +53,11 @@ public class LibvirtStorageAdaptorTest { private AutoCloseable closeable; + @Mock + LibvirtStoragePool mockPool; + + MockedStatic