Error Prone integration - Added error prone to the build

This commit is contained in:
Pearl Dsilva 2026-01-13 16:06:57 -05:00
parent 6a324da27a
commit 43162bfce8
43 changed files with 160 additions and 137 deletions

View File

@ -249,7 +249,7 @@ public class MockVmMgr implements VmMgr {
public MockVm createVmFromSpec(VirtualMachineTO vmSpec) {
String vmName = vmSpec.getName();
long ramSize = vmSpec.getMinRam();
int utilizationPercent = randSeed.nextInt() % 100;
int utilizationPercent = randSeed.nextInt(100);
MockVm vm = null;
synchronized (this) {

View File

@ -83,7 +83,7 @@ public class UpdateBackupOfferingCmd extends BaseCmd {
public void execute() {
try {
if (StringUtils.isAllEmpty(getName(), getDescription()) && getAllowUserDrivenBackups() == null) {
throw new InvalidParameterValueException(String.format("Can't update Backup Offering [id: %s] because there are no parameters to be updated, at least one of the",
throw new InvalidParameterValueException(String.format("Can't update Backup Offering [id: %s] because there are no parameters to be updated, at least one of the " +
"following should be informed: name, description or allowUserDrivenBackups.", id));
}

View File

@ -106,7 +106,7 @@ public abstract class AbstractConfigItemFacade {
public static AbstractConfigItemFacade getInstance(final Class<? extends NetworkElementCommand> key) {
if (!flyweight.containsKey(key)) {
throw new CloudRuntimeException("Unable to process the configuration for " + key.getClass().getName());
throw new CloudRuntimeException("Unable to process the configuration for " + key.getName());
}
final AbstractConfigItemFacade instance = flyweight.get(key);

View File

@ -514,7 +514,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
}
dstSubRule.add(sb.toString());
if (stickinessSubRule != null) {
sb.append(" cookie ").append(dest.getDestIp().replace(".", "_")).append('-').append(dest.getDestPort()).toString();
sb.append(" cookie ").append(dest.getDestIp().replace(".", "_")).append('-').append(dest.getDestPort());
dstWithCookieSubRule.add(sb.toString());
}
destsAvailable = true;

View File

@ -86,7 +86,7 @@ public abstract class RequestWrapper {
keepCommandClass = commandClass2;
} catch (final ClassCastException e) {
throw new CommandNotSupported("No key found for '" + keepCommandClass.getClass() + "' in the Map!");
throw new CommandNotSupported("No key found for '" + keepCommandClass + "' in the Map!");
} catch (final NullPointerException e) {
// Will now traverse all the resource hierarchy. Returning null
// is not a problem.

View File

@ -52,7 +52,6 @@ public abstract class DirectDownloadCommand extends StorageSubSystemCommand {
final Integer soTimeout, final Integer connectionRequestTimeout, final boolean followRedirects) {
this.url = url;
this.templateId = templateId;
this.destData = destData;
this.destPool = destPool;
this.checksum = checksum;
this.headers = headers;

View File

@ -24,6 +24,7 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@ -497,6 +498,11 @@ public abstract class AgentAttache {
*/
protected abstract boolean isClosed();
@Override
public int hashCode() {
return Objects.hash(logger, _id, _uuid, _name, _waitForList, _requests, _currentSequence, _status, _maintenance, _nextSequence, _agentMgr);
}
protected class Alarm extends ManagedContextRunnable {
long _seq;

View File

@ -19,6 +19,7 @@ package com.cloud.agent.manager;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@ -148,6 +149,11 @@ public class DirectAgentAttache extends AgentAttache {
}
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), _HostPingRetryCount, _HostPingRetryTimer, _resource, _futures, _seq, tasks, _outstandingTaskCount, _outstandingCronTaskCount);
}
protected class PingTask extends ManagedContextRunnable {
@Override
protected synchronized void runInContext() {

View File

@ -1730,7 +1730,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
} catch (final OperationTimedoutException e) {
throw new AgentUnavailableException(String.format("Unable to stop vm [%s] because the operation to stop timed out", vmUuid), e.getAgentId(), e);
} catch (final ConcurrentOperationException e) {
throw new CloudRuntimeException(String.format("Unable to stop vm because of a concurrent operation", vmUuid), e);
throw new CloudRuntimeException(String.format("Unable to stop vm: %s because of a concurrent operation", vmUuid), e);
}
}

View File

@ -1737,7 +1737,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final List<FirewallRuleVO> firewallEgressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress);
final NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId());
final DataCenter zone = _dcDao.findById(network.getDataCenterId());
if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall) && _networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)
if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)
&& (network.getGuestType() == Network.GuestType.Isolated || network.getGuestType() == Network.GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) {
// add default egress rule to accept the traffic
_firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true);

View File

@ -460,7 +460,7 @@ public class NetworkOfferingVO implements NetworkOffering {
true,
Availability.Optional,
null,
Network.GuestType.Isolated,
guestType,
true,
false,
false,

View File

@ -324,38 +324,33 @@ public class SystemVmTemplateRegistration {
public static final Map<String, MetadataTemplateDetails> NewTemplateMap = new HashMap<>();
public static final Map<Hypervisor.HypervisorType, String> RouterTemplateConfigurationNames = new HashMap<>() {
{
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver");
put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
public static final Map<Hypervisor.HypervisorType, String> RouterTemplateConfigurationNames = Map.of(
Hypervisor.HypervisorType.KVM, "router.template.kvm",
Hypervisor.HypervisorType.VMware, "router.template.vmware",
Hypervisor.HypervisorType.XenServer, "router.template.xenserver",
Hypervisor.HypervisorType.Hyperv, "router.template.hyperv",
Hypervisor.HypervisorType.LXC, "router.template.lxc",
Hypervisor.HypervisorType.Ovm3, "router.template.ovm3"
);
public static Map<Hypervisor.HypervisorType, Integer> hypervisorGuestOsMap = new HashMap<>() {
{
put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID);
put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID);
put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
}
};
public static Map<Hypervisor.HypervisorType, Integer> hypervisorGuestOsMap = new HashMap<>();
static {
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID);
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID);
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
}
public static final Map<Hypervisor.HypervisorType, ImageFormat> hypervisorImageFormat = new HashMap<Hypervisor.HypervisorType, ImageFormat>() {
{
put(Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2);
put(Hypervisor.HypervisorType.XenServer, ImageFormat.VHD);
put(Hypervisor.HypervisorType.VMware, ImageFormat.OVA);
put(Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD);
put(Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2);
put(Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW);
}
};
public static final Map<Hypervisor.HypervisorType, ImageFormat> hypervisorImageFormat = Map.of(
Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2,
Hypervisor.HypervisorType.XenServer, ImageFormat.VHD,
Hypervisor.HypervisorType.VMware, ImageFormat.OVA,
Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD,
Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2,
Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW
);
public boolean validateIfSeeded(TemplateDataStoreVO templDataStoreVO, String url, String path, String nfsVersion) {
String filePath = null;

View File

@ -98,7 +98,7 @@ public class DatabaseAccessObject {
return true;
}
} catch (SQLException e) {
logger.debug(String.format("Index %s doesn't exist, ignoring exception:", indexName, e.getMessage()));
logger.debug(String.format("Index %s doesn't exist, ignoring exception:", indexName), e.getMessage());
}
return false;
}

View File

@ -22,7 +22,6 @@ import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@ -98,49 +97,41 @@ public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgr
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e);
}
final Map<Hypervisor.HypervisorType, String> NewTemplateNameList = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(KVM, "systemvm-kvm-4.15.1");
put(VMware, "systemvm-vmware-4.15.1");
put(XenServer, "systemvm-xenserver-4.15.1");
put(Hyperv, "systemvm-hyperv-4.15.1");
put(LXC, "systemvm-lxc-4.15.1");
put(Ovm3, "systemvm-ovm3-4.15.1");
}
};
final Map<Hypervisor.HypervisorType, String> NewTemplateNameList = Map.of(
KVM, "systemvm-kvm-4.15.1",
VMware, "systemvm-vmware-4.15.1",
XenServer, "systemvm-xenserver-4.15.1",
Hyperv, "systemvm-hyperv-4.15.1",
LXC, "systemvm-lxc-4.15.1",
Ovm3, "systemvm-ovm3-4.15.1"
);
final Map<Hypervisor.HypervisorType, String> routerTemplateConfigurationNames = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(KVM, "router.template.kvm");
put(VMware, "router.template.vmware");
put(XenServer, "router.template.xenserver");
put(Hyperv, "router.template.hyperv");
put(LXC, "router.template.lxc");
put(Ovm3, "router.template.ovm3");
}
};
final Map<Hypervisor.HypervisorType, String> routerTemplateConfigurationNames = Map.of(
KVM, "router.template.kvm",
VMware, "router.template.vmware",
XenServer, "router.template.xenserver",
Hyperv, "router.template.hyperv",
LXC, "router.template.lxc",
Ovm3, "router.template.ovm3"
);
final Map<Hypervisor.HypervisorType, String> newTemplateUrl = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(KVM, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2");
put(VMware, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-vmware.ova");
put(XenServer, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-xen.vhd.bz2");
put(Hyperv, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-hyperv.vhd.zip");
put(LXC, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2");
put(Ovm3, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-ovm.raw.bz2");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateUrl = Map.of(
KVM, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2",
VMware, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-vmware.ova",
XenServer, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-xen.vhd.bz2",
Hyperv, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-hyperv.vhd.zip",
LXC, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2",
Ovm3, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-ovm.raw.bz2"
);
final Map<Hypervisor.HypervisorType, String> newTemplateChecksum = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(KVM, "0e9f9a7d0957c3e0a2088e41b2da2cec");
put(XenServer, "86373992740b1eca8aff8b08ebf3aea5");
put(VMware, "4006982765846d373eb3719b2fe4d720");
put(Hyperv, "0b9514e4b6cba1f636fea2125f0f7a5f");
put(LXC, "0e9f9a7d0957c3e0a2088e41b2da2cec");
put(Ovm3, "ae3977e696b3e6c81bdcbb792d514d29");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateChecksum = Map.of(
KVM, "0e9f9a7d0957c3e0a2088e41b2da2cec",
XenServer, "86373992740b1eca8aff8b08ebf3aea5",
VMware, "4006982765846d373eb3719b2fe4d720",
Hyperv, "0b9514e4b6cba1f636fea2125f0f7a5f",
LXC, "0e9f9a7d0957c3e0a2088e41b2da2cec",
Ovm3, "ae3977e696b3e6c81bdcbb792d514d29"
);
for (final Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName : NewTemplateNameList.entrySet()) {
logger.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms");

View File

@ -139,7 +139,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot
for (VolumeObjectTO volumeTO : volumeTOs) {
Long poolId = volumeTO.getPoolId();
Storage.StoragePoolType poolType = vmSnapshotHelper.getStoragePoolType(poolId);
if (poolType != Storage.StoragePoolType.PowerFlex || volumeTO.getFormat() != ImageFormat.RAW || poolId != rootPoolId) {
if (poolType != Storage.StoragePoolType.PowerFlex || volumeTO.getFormat() != ImageFormat.RAW || !poolId.equals(rootPoolId)) {
return StrategyPriority.CANT_HANDLE;
}
}

View File

@ -25,7 +25,6 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import javax.inject.Inject;
@ -198,12 +197,7 @@ public class DefaultEndPointSelector implements EndPointSelector {
Pair<List<DedicatedResourceVO>, Integer> hostIds = dedicatedResourceDao.searchDedicatedHosts(null, null, account.getId(), null, null);
List<DedicatedResourceVO> accountDedicatedHosts = hostIds.first();
for (DedicatedResourceVO accountDedicatedResource: accountDedicatedHosts){
Iterator<Long> dedicatedHostsIterator = dedicatedHosts.iterator();
while (dedicatedHostsIterator.hasNext()) {
if (dedicatedHostsIterator.next() == accountDedicatedResource.getHostId()) {
dedicatedHostsIterator.remove();
}
}
dedicatedHosts.removeIf(hostId -> hostId.equals(accountDedicatedResource.getHostId()));
}
}
}

View File

@ -55,7 +55,7 @@ public class OnwireClassRegistry {
}
public OnwireClassRegistry(List<String> packages) {
packages.addAll(packages);
this.packages.addAll(packages);
}
public List<String> getPackages() {

View File

@ -440,7 +440,7 @@ public final class RootCAProvider extends AdapterBase implements CAProvider, Con
@Override
public boolean start() {
managementCertificateCustomSAN = CAManager.CertManagementCustomSubjectAlternativeName.value();
return loadRootCAKeyPair() && loadRootCAKeyPair() && loadManagementKeyStore();
return loadRootCAKeyPair() && loadManagementKeyStore();
}
@Override

View File

@ -45,7 +45,7 @@ public class HypervInvestigator extends AdapterBase implements Investigator {
if (status == null) {
throw new UnknownVM();
}
return status == Status.Up ? true : null;
return status == Status.Up;
}
@Override

View File

@ -3882,12 +3882,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
LOGGER.info(String.format("Host uses control group [%s].", output));
if (!CGROUP_V2.equals(output)) {
LOGGER.info(String.format("Setting host CPU max capacity to 0, as it uses cgroup v1.", getHostCpuMaxCapacity()));
LOGGER.info("Setting host CPU max capacity: {} to 0, as it uses cgroup v1.", getHostCpuMaxCapacity());
setHostCpuMaxCapacity(0);
return;
}
LOGGER.info(String.format("Calculating the max shares of the host."));
LOGGER.info("Calculating the max shares of the host.");
setHostCpuMaxCapacity(cpuCores * cpuSpeed.intValue());
LOGGER.info(String.format("The max shares of the host is [%d].", getHostCpuMaxCapacity()));
}
@ -5302,7 +5302,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
for (String snapshotName: snapshotNames) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Cleaning snapshot [%s] of VM [%s] metadata.", snapshotNames, dm.getName()));
LOGGER.debug("Cleaning snapshot {} of VM {} metadata.", Arrays.toString(snapshotNames), dm.getName());
}
DomainSnapshot snapshot = dm.snapshotLookupByName(snapshotName);
snapshot.delete(flags); // clean metadata of vm snapshot

View File

@ -794,7 +794,7 @@ public class KVMStorageProcessor implements StorageProcessor {
if (path == null) {
path = srcData.getPath();
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
throw new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
}
@ -2261,7 +2261,7 @@ public class KVMStorageProcessor implements StorageProcessor {
if (path == null) {
path = details != null ? details.get(DiskTO.IQN) : null;
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
throw new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
}

View File

@ -228,21 +228,21 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
@Override
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) {
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
if (LOGGER.isDebugEnabled()) LOGGER.debug("disconnectPhysicalDisk(volumePath,pool) called with args ({}, {}) START", volumePath, pool.getUuid());
AddressInfo address = this.parseAndValidatePath(volumePath);
if (address.getAddress() == null) {
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) returning FALSE, volume path has no address field", volumePath, pool.getUuid()));
if (LOGGER.isDebugEnabled()) LOGGER.debug("disconnectPhysicalDisk(volumePath,pool) called with args ({}, {}) returning FALSE, volume path has no address field", volumePath, pool.getUuid());
return false;
}
ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase());
if (result.getExitCode() != 0) {
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", address.getAddress().toLowerCase(), result.getExitCode()));
LOGGER.warn("Disconnect failed for path {} with return code {}", address.getAddress().toLowerCase(), result.getExitCode());
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult()));
LOGGER.debug("multipath flush output: {}", result.getResult());
LOGGER.debug("disconnectPhysicalDisk(volumePath,pool) called with args ({}, {}) COMPLETE [rc={}]", volumePath, pool.getUuid(), result.getResult());
}
return (result.getExitCode() == 0);
@ -250,7 +250,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
@Override
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
LOGGER.debug(String.format("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
LOGGER.debug("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]: {}", volumeToDisconnect);
return false;
}

View File

@ -181,10 +181,9 @@ public class XenServerGuru extends HypervisorGuruBase implements HypervisorGuru,
logger.debug("We are returning the default host to execute commands because the target hypervisor of the source data is not XenServer.");
return defaultHostToExecuteCommands;
}
// only now can we decide, now we now we're only deciding for ourselves
if (cmd instanceof StorageSubSystemCommand) {
if (logger.isTraceEnabled()) {
logger.trace(String.format("XenServer StrorageSubSystemCommand re always executed in sequence (command of type %s to host %l).", cmd.getClass(), hostId));
logger.trace(String.format("XenServer StrorageSubSystemCommand is always executed in sequence (command of type %s to host %s).", cmd.getClass(), hostId));
}
StorageSubSystemCommand c = (StorageSubSystemCommand)cmd;
c.setExecuteInSequence(true);
@ -223,7 +222,7 @@ public class XenServerGuru extends HypervisorGuruBase implements HypervisorGuru,
return defaultHostToExecuteCommands;
}
logger.debug(String.format("We are changing the hostId to executed command from %d to %d.", hostId, hostCandidateToExecutedCommand.getId()));
return new Pair<Boolean, Long>(Boolean.TRUE, new Long(hostCandidateToExecutedCommand.getId()));
return new Pair<>(Boolean.TRUE, hostCandidateToExecutedCommand.getId());
}
@Override

View File

@ -39,7 +39,7 @@ public class RedfishWrapper {
case SOFT:
return RedfishClient.RedfishResetCmd.GracefulShutdown;
case STATUS:
throw new IllegalStateException(String.format("%s is not a valid Redfish Reset command [%s]", operation));
throw new IllegalStateException(String.format("%s is not a valid Redfish Reset command", operation));
default:
throw new IllegalStateException(String.format("Redfish does not support operation [%s]", operation));
}

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.datastore.util;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Objects;
import org.apache.cloudstack.storage.datastore.util.NexentaNmsClient.NmsResponse;
import org.apache.logging.log4j.Logger;
@ -132,6 +133,11 @@ public class NexentaStorAppliance {
public boolean equals(Object other) {
return other instanceof CreateIscsiTargetRequestParams && targetName.equals(((CreateIscsiTargetRequestParams) other).targetName);
}
@Override
public int hashCode() {
return Objects.hashCode(targetName);
}
}
/**
@ -248,6 +254,11 @@ public class NexentaStorAppliance {
public boolean equals(Object other) {
return other instanceof LuParams;
}
@Override
public int hashCode() {
return 1;
}
}
/**
@ -309,6 +320,11 @@ public class NexentaStorAppliance {
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(targetGroup, lun, zvol, hostGroup, entryNumber);
}
}
@SuppressWarnings("unused")

View File

@ -399,7 +399,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private String resizeVolume(DataObject data, String path, VolumeObject vol) {
String err = null;
ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
boolean needResize = vol.getSize() != payload.newSize;
boolean needResize = !vol.getSize().equals(payload.newSize);
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(path, true);
final long oldSize = vol.getSize();

View File

@ -221,7 +221,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy {
if (answer != null && answer.getResult()) {
SpApiResponse resSnapshot = StorPoolUtil.volumeSnapshot(volumeName, template.getUuid(), null, "template", null, conn);
if (resSnapshot.getError() != null) {
logger.debug(String.format("Could not snapshot volume with ID={}", snapshot.getId()));
logger.debug("Could not snapshot volume with ID={}", snapshot.getId());
StorPoolUtil.spLog("VolumeSnapshot failed with error=%s", resSnapshot.getError().getDescr());
err = resSnapshot.getError().getDescr();
} else {

View File

@ -34,6 +34,7 @@ public class ADLdapUserManagerImpl extends OpenLdapUserManagerImpl implements Ld
private static final String MICROSOFT_AD_MEMBERS_FILTER = "memberOf";
@Override
@SuppressWarnings("BanJNDI")
public List<LdapUser> getUsersInGroup(String groupName, LdapContext context, Long domainId) throws NamingException {
if (StringUtils.isBlank(groupName)) {
throw new IllegalArgumentException("ldap group name cannot be blank");

View File

@ -135,8 +135,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
final StringBuilder memberOfFilter = new StringBuilder();
if (null != group) {
if(logger.isDebugEnabled()) {
logger.debug("adding search filter for '" + group +
"', using '" + memberOfAttribute + "'");
logger.debug("adding search filter for '{}', using '{}'", group, memberOfAttribute);
}
memberOfFilter.append("(" + memberOfAttribute + "=");
memberOfFilter.append(group);
@ -235,6 +234,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
}
@Override
@SuppressWarnings("BanJNDI")
public List<LdapUser> getUsersInGroup(String groupName, LdapContext context, Long domainId) throws NamingException {
String attributeName = _ldapConfiguration.getGroupUniqueMemberAttribute(domainId);
final SearchControls controls = new SearchControls();
@ -264,6 +264,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
return users;
}
@SuppressWarnings("BanJNDI")
private LdapUser getUserForDn(String userdn, LdapContext context, Long domainId) throws NamingException {
final SearchControls controls = new SearchControls();
controls.setSearchScope(_ldapConfiguration.getScope());
@ -286,6 +287,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
return false;
}
@SuppressWarnings("BanJNDI")
public LdapUser searchUser(final String basedn, final String searchString, final LdapContext context, Long domainId) throws NamingException, IOException {
final SearchControls searchControls = new SearchControls();
@ -310,6 +312,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
}
@Override
@SuppressWarnings("BanJNDI")
public List<LdapUser> searchUsers(final String username, final LdapContext context, Long domainId) throws NamingException, IOException {
final SearchControls searchControls = new SearchControls();

17
pom.xml
View File

@ -80,6 +80,7 @@
<cs.surefire-plugin.version>2.22.2</cs.surefire-plugin.version>
<cs.clover-maven-plugin.version>4.4.1</cs.clover-maven-plugin.version>
<cs.exec-maven-plugin.version>3.2.0</cs.exec-maven-plugin.version>
<cs.errorprone.version>2.24.1</cs.errorprone.version>
<!-- Logging versions -->
<cs.log4j.version>2.19.0</cs.log4j.version>
@ -1094,15 +1095,25 @@
<configuration>
<source>${cs.jdk.version}</source>
<target>${cs.jdk.version}</target>
<fork>true</fork>
<meminitial>128m</meminitial>
<maxmem>512m</maxmem>
<encoding>UTF-8</encoding>
<compilerArgs>
<arg>-XDignore.symbol.file=true</arg>
<arg>--add-opens=java.base/java.lang=ALL-UNNAMED</arg>
<arg>--add-exports=java.base/sun.security.x509=ALL-UNNAMED</arg>
<arg>--add-exports=java.base/sun.security.provider=ALL-UNNAMED</arg>
<arg>-XDcompilePolicy=simple</arg>
<arg>-Xplugin:ErrorProne</arg>
</compilerArgs>
<annotationProcessorPaths>
<path>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_core</artifactId>
<version>${cs.errorprone.version}</version>
</path>
</annotationProcessorPaths>
<fork>true</fork>
<meminitial>128m</meminitial>
<maxmem>512m</maxmem>
</configuration>
</plugin>
<plugin>

View File

@ -411,7 +411,7 @@ public class ApiXmlDocWriter {
xs.alias("alert", Alert.class);
try(ObjectOutputStream out = xs.createObjectOutputStream(new FileWriter(dirName + "/alert_types.xml"), "alerts");) {
for (Field f : AlertManager.class.getFields()) {
if (f.getClass().isAssignableFrom(Number.class)) {
if (Number.class.isAssignableFrom(f.getType())) {
String name = f.getName().substring(11);
Alert alert = new Alert(name, f.getInt(null));
out.writeObject(alert);

View File

@ -19,7 +19,7 @@ package com.cloud.api.doc;
import java.io.Serializable;
import java.util.List;
public class Argument implements Comparable<Object>, Serializable {
public class Argument implements Comparable<Argument>, Serializable {
private static final long serialVersionUID = 2L;
private String name;
private String description;
@ -101,11 +101,11 @@ public class Argument implements Comparable<Object>, Serializable {
}
@Override
public int compareTo(Object anotherAgrument) throws ClassCastException {
if (!(anotherAgrument instanceof Argument))
throw new ClassCastException("An Argument object expected.");
Argument argument = (Argument)anotherAgrument;
return this.getName().compareToIgnoreCase(argument.getName());
public int compareTo(Argument anotherArgument) {
if (anotherArgument == null) {
throw new NullPointerException("Cannot compare to null Argument");
}
return this.getName().compareToIgnoreCase(anotherArgument.getName());
}
public boolean hasArguments() {

View File

@ -39,6 +39,7 @@ import com.cloud.storage.VnfTemplateNicVO;
import com.cloud.storage.dao.VnfTemplateDetailsDao;
import com.cloud.storage.dao.VnfTemplateNicDao;
import com.cloud.user.dao.UserDataDao;
import com.google.common.primitives.Ints;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
@ -340,7 +341,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
ChildTemplateResponse childTempl = new ChildTemplateResponse();
childTempl.setId(tmpl.getUuid());
childTempl.setName(tmpl.getName());
childTempl.setSize(Math.round(tmpl.getSize() / (1024 * 1024 * 1024)));
childTempl.setSize(Ints.saturatedCast(tmpl.getSize() / (1024 * 1024 * 1024)));
childTemplatesSet.add(childTempl);
}
}

View File

@ -5391,7 +5391,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
}
final Boolean isRangeForSystemVM = checkIfVlanRangeIsForSystemVM(id);
if (forSystemVms != null && isRangeForSystemVM != forSystemVms) {
if (forSystemVms != null && !isRangeForSystemVM.equals(forSystemVms)) {
if (VlanType.DirectAttached.equals(vlanRange.getVlanType())) {
throw new InvalidParameterValueException("forSystemVms is not available for this IP range with vlan type: " + VlanType.DirectAttached);
}

View File

@ -1041,7 +1041,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur
try {
if (vm != null && !VmHaEnabled.valueIn(vm.getDataCenterId())) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("VM high availability manager is disabled, rescheduling the HA work %s, for the VM %s (id) to retry later in case VM high availability manager is enabled on retry attempt", work, vm.getName(), vm.getId()));
logger.debug("VM high availability manager is disabled, rescheduling the HA work {}, for the VM {} (id: {}) to retry later in case VM high availability manager is enabled on retry attempt", work, vm.getName(), vm.getId());
}
long nextTime = getRescheduleTime(wt);
rescheduleWork(work, nextTime);

View File

@ -1311,7 +1311,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
try {
_privateIPAddressDao.releasePodIpAddress(id);
} catch (Exception e) {
new CloudRuntimeException(e.getMessage());
throw new CloudRuntimeException(e.getMessage());
}
}

View File

@ -1443,7 +1443,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
protected boolean zoneWideVolumeRequiresStorageMotion(PrimaryDataStore volumeDataStore,
final Host sourceHost, final Host destinationHost) {
if (volumeDataStore.isManaged() && sourceHost.getClusterId() != destinationHost.getClusterId()) {
if (volumeDataStore.isManaged() && !sourceHost.getClusterId().equals(destinationHost.getClusterId())) {
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver)volumeDataStore.getDriver();
// Depends on the storage driver. For some storages simply
// changing volume access to host should work: grant access on destination

View File

@ -3689,7 +3689,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} else {
result = CollectionUtils.isSubCollection(Arrays.asList(newDiskOfferingTagsAsStringArray), storageTagsList);
}
logger.debug(String.format("Destination storage pool [{}] accepts tags [{}]? {}", destPool.getUuid(), diskOfferingTags, result));
logger.debug("Destination storage pool [{}] accepts tags [{}]? {}", destPool.getUuid(), diskOfferingTags, result);
return result;
}

View File

@ -2140,7 +2140,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
protected void validateDiskOfferingChecks(ServiceOfferingVO currentServiceOffering, ServiceOfferingVO newServiceOffering) {
if (currentServiceOffering.getDiskOfferingStrictness() != newServiceOffering.getDiskOfferingStrictness()) {
if (!currentServiceOffering.getDiskOfferingStrictness().equals(newServiceOffering.getDiskOfferingStrictness())) {
throw new InvalidParameterValueException("Unable to Scale VM, since disk offering strictness flag is not same for new service offering and old service offering");
}
@ -2391,7 +2391,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
_executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("UserVm-Scavenger"));
String vmIpWorkers = configs.get(VmIpFetchTaskWorkers.value());
String vmIpWorkers = configs.get(VmIpFetchTaskWorkers.key());
int vmipwrks = NumbersUtil.parseInt(vmIpWorkers, 10);
_vmIpFetchExecutor = Executors.newScheduledThreadPool(vmipwrks, new NamedThreadFactory("UserVm-ipfetch"));

View File

@ -390,7 +390,7 @@ public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements Rou
DataCenterIpv4GuestSubnetVO subnetVO = dataCenterIpv4GuestSubnetDao.findById(id);
if (subnetVO == null) {
throw new InvalidParameterValueException(String.format("Cannot find subnet with id: ", id));
throw new InvalidParameterValueException(String.format("Cannot find subnet with id: %s", id));
}
Long accountId = null;
if (accountName != null || (projectId != null && projectId != -1L)) {
@ -430,7 +430,7 @@ public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements Rou
final Long id = cmd.getId();
DataCenterIpv4GuestSubnetVO subnetVO = dataCenterIpv4GuestSubnetDao.findById(id);
if (subnetVO == null) {
throw new InvalidParameterValueException(String.format("Cannot find subnet with id: ", id));
throw new InvalidParameterValueException(String.format("Cannot find subnet with id: %s", id));
}
// update domain_id and account_id to null via dataCenterIpv4GuestSubnetDao, to release the dedication
@ -1278,7 +1278,7 @@ public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements Rou
BgpPeerVO bgpPeerVO = bgpPeerDao.findById(id);
if (bgpPeerVO == null) {
throw new InvalidParameterValueException(String.format("Cannot find BGP peer with id: ", id));
throw new InvalidParameterValueException(String.format("Cannot find BGP peer with id: %s", id));
}
Long accountId = null;
if (accountName != null || (projectId != null && projectId != -1L)) {
@ -1326,7 +1326,7 @@ public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements Rou
final Long id = releaseDedicatedBgpPeerCmd.getId();
BgpPeerVO bgpPeerVO = bgpPeerDao.findById(id);
if (bgpPeerVO == null) {
throw new InvalidParameterValueException(String.format("Cannot find BGP peer with id: ", id));
throw new InvalidParameterValueException(String.format("Cannot find BGP peer with id: %s", id));
}
// update domain_id and account_id to null via bgpPeerDao, to release the dedication

View File

@ -673,7 +673,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR
// set site as 'local' for the site in that zone
for (Pair<Long, Long> innerLoopZoneId : gslbSiteIds) {
SiteLoadBalancerConfig siteLb = zoneSiteLoadbalancerMap.get(innerLoopZoneId.first());
siteLb.setLocal(zoneId.first() == innerLoopZoneId.first());
siteLb.setLocal(zoneId.first().equals(innerLoopZoneId.first()));
slbs.add(siteLb);
}

View File

@ -287,8 +287,9 @@ public class ServerNtlmsspChallenge extends OneTimeSwitch implements NtlmConstan
if (state.serverChallenge == null)
throw new RuntimeException("Challenge was not extracted from server NTLMSSP Challenge packet.");
if (!Arrays.equals(challenge, state.serverChallenge))
throw new RuntimeException("Challenge was extracted from server NTLMSSP Challenge packet is not equal to expected. Actual value: "
+ state.serverChallenge + ", expected value: " + challenge + ".");
throw new RuntimeException(String.format("Challenge was extracted from server NTLMSSP Challenge packet is not" +
" equal to expected. Actual value: %s, expected value: %s.",
Arrays.toString(state.serverChallenge), Arrays.toString(challenge)));
}

View File

@ -215,7 +215,7 @@ public class ByteBuffer {
public void extend(int newLength) {
if (data.length < newLength)
Arrays.copyOf(data, newLength);
data = Arrays.copyOf(data, newLength);
}
public void ref() {