diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 15f010808ac..c84179d6660 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -132,6 +132,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater ServerResource _resource; Link _link; Long _id; + String _uuid; + String _name; Timer _timer = new Timer("Agent Timer"); Timer certTimer; @@ -182,8 +184,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater resource.setAgentControl(this); final String value = _shell.getPersistentProperty(getResourceName(), "id"); + _uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); + _name = _shell.getPersistentProperty(getResourceName(), "name"); _id = value != null ? Long.parseLong(value) : null; - logger.info("id is {}", ObjectUtils.defaultIfNull(_id, "")); + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name); final Map params = new HashMap<>(); @@ -212,8 +216,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( "agentRequest-Handler")); - logger.info("Agent [id = {} : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", ObjectUtils.defaultIfNull(_id, "new"), getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", + ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(), + _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); } public String getVersion() { @@ -377,11 +382,28 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } public void setId(final Long id) { - logger.debug("Set agent id {}", id); _id = id; _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } + public String getUuid() { + return _uuid; + } + + public void setUuid(String uuid) { + this._uuid = uuid; + _shell.setPersistentProperty(getResourceName(), "uuid", uuid); + } + + public String getName() { + return _name; + } + + public void setName(String name) { + this._name = name; + _shell.setPersistentProperty(getResourceName(), "name", name); + } + private synchronized void scheduleServicesRestartTask() { if (certTimer != null) { certTimer.cancel(); @@ -594,9 +616,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return; } - logger.info("Process agent startup answer, agent id = {}", startup.getHostId()); + logger.info("Process agent startup answer, agent [id: {}, uuid: {}, name: {}] connected to the server", + startup.getHostId(), startup.getHostUuid(), startup.getHostName()); setId(startup.getHostId()); + setUuid(startup.getHostUuid()); + setName(startup.getHostName()); _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. setLastPingResponseTime(); @@ -604,7 +629,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); - logger.info("Startup Response Received: agent id = {}", getId()); + logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]", + startup.getHostId(), startup.getHostUuid(), startup.getHostName()); } protected void processRequest(final Request request, final Link link) { @@ -860,15 +886,17 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater NumbersUtil.enableHumanReadableSizes = humanReadable; } - logger.info("Processing agent ready command, agent id = {}", ready.getHostId()); + logger.info("Processing agent ready command, agent id = {}, uuid = {}, name = {}", ready.getHostId(), ready.getHostUuid(), ready.getHostName()); if (ready.getHostId() != null) { setId(ready.getHostId()); + setUuid(ready.getHostUuid()); + setName(ready.getHostName()); } verifyAgentArch(ready.getArch()); processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval()); - logger.info("Ready command is processed for agent id = {}", getId()); + logger.info("Ready command is processed for agent [id: {}, uuid: {}, name: {}]", getId(), getUuid(), getName()); } private void verifyAgentArch(String arch) { diff --git a/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java b/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java index 966d24886fe..f395f26aeed 100644 --- a/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java @@ -374,13 +374,15 @@ public class LoadBalancerTO { public static class CounterTO implements Serializable { private static final long serialVersionUID = 2L; private final Long id; + private final String uuid; private final String name; private final Counter.Source source; private final String value; private final String provider; - public CounterTO(Long id, String name, Counter.Source source, String value, String provider) { + public CounterTO(Long id, String uuid, String name, Counter.Source source, String value, String provider) { this.id = id; + this.uuid = uuid; this.name = name; this.source = source; this.value = value; @@ -391,6 +393,10 @@ public class LoadBalancerTO { return id; } + public String getUuid() { + return uuid; + } + public String getName() { return name; } @@ -411,12 +417,14 @@ public class LoadBalancerTO { public static class ConditionTO implements Serializable { private static final long serialVersionUID = 2L; private final Long id; + private final String uuid; private final long threshold; private final Condition.Operator relationalOperator; private final CounterTO counter; - public ConditionTO(Long id, long threshold, Condition.Operator relationalOperator, CounterTO counter) { + public ConditionTO(Long id, String uuid, long threshold, Condition.Operator relationalOperator, CounterTO counter) { this.id = id; + this.uuid = uuid; this.threshold = threshold; this.relationalOperator = relationalOperator; this.counter = counter; @@ -426,6 +434,10 @@ public class LoadBalancerTO { return id; } + public String getUuid() { + return uuid; + } + public long getThreshold() { return threshold; } @@ -442,6 +454,7 @@ public class LoadBalancerTO { public static class AutoScalePolicyTO implements Serializable { private static final long serialVersionUID = 2L; private final long id; + private final String uuid; private final int duration; private final int quietTime; private final Date lastQuietTime; @@ -449,8 +462,9 @@ public class LoadBalancerTO { boolean revoked; private final List conditions; - public AutoScalePolicyTO(long id, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List conditions, boolean revoked) { + public AutoScalePolicyTO(long id, String uuid, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List conditions, boolean revoked) { this.id = id; + this.uuid = uuid; this.duration = duration; this.quietTime = quietTime; this.lastQuietTime = lastQuietTime; @@ -463,6 +477,10 @@ public class LoadBalancerTO { return id; } + public String getUuid() { + return uuid; + } + public int getDuration() { return duration; } diff --git a/api/src/main/java/com/cloud/agent/api/to/NfsTO.java b/api/src/main/java/com/cloud/agent/api/to/NfsTO.java index 0f6511e8311..eeddbf649a7 100644 --- a/api/src/main/java/com/cloud/agent/api/to/NfsTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/NfsTO.java @@ -17,6 +17,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class NfsTO implements DataStoreTO { @@ -41,6 +42,13 @@ public class NfsTO implements DataStoreTO { } + @Override + public String toString() { + return String.format("NfsTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "uuid", "_url", "_role", "nfsVersion")); + } + @Override public String getUrl() { return _url; diff --git a/api/src/main/java/com/cloud/agent/api/to/S3TO.java b/api/src/main/java/com/cloud/agent/api/to/S3TO.java index 233238cf793..936f8168b1e 100644 --- a/api/src/main/java/com/cloud/agent/api/to/S3TO.java +++ b/api/src/main/java/com/cloud/agent/api/to/S3TO.java @@ -22,6 +22,7 @@ import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.LogLevel.Log4jLevel; import com.cloud.storage.DataStoreRole; import com.cloud.utils.storage.S3.ClientOptions; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public final class S3TO implements ClientOptions, DataStoreTO { @@ -68,6 +69,13 @@ public final class S3TO implements ClientOptions, DataStoreTO { } + @Override + public String toString() { + return String.format("S3TO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "bucketName")); + } + public Long getId() { return this.id; } diff --git a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java index e361e7a141f..cbdb7922eb4 100644 --- a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java @@ -19,6 +19,7 @@ package com.cloud.agent.api.to; import com.cloud.agent.api.LogLevel; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class StorageFilerTO { long id; @@ -73,6 +74,6 @@ public class StorageFilerTO { @Override public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(host).append(":").append(port).append("|").append(path).append("]").toString(); + return String.format("Pool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "host", "port", "path")); } } diff --git a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java index b89dfea40e0..14038566fbd 100644 --- a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java @@ -18,6 +18,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; import com.cloud.utils.SwiftUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { Long id; @@ -41,6 +42,13 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { this.storagePolicy = storagePolicy; } + @Override + public String toString() { + return String.format("SwiftTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "account", "userName")); + } + public Long getId() { return id; } diff --git a/api/src/main/java/com/cloud/network/Ipv6Service.java b/api/src/main/java/com/cloud/network/Ipv6Service.java index 2b4dff01086..4ef5f98c38d 100644 --- a/api/src/main/java/com/cloud/network/Ipv6Service.java +++ b/api/src/main/java/com/cloud/network/Ipv6Service.java @@ -58,7 +58,7 @@ public interface Ipv6Service extends PluggableService, Configurable { Pair getUsedTotalIpv6SubnetForZone(long zoneId); - Pair preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException; + Pair preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException; void assignIpv6SubnetToNetwork(String subnet, long networkId); diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 83dc247cc9e..2e8efb48930 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -22,6 +22,7 @@ import java.util.Date; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class NetworkProfile implements Network { private final long id; @@ -384,4 +385,11 @@ public class NetworkProfile implements Network { return networkCidrSize; } + @Override + public String toString() { + return String.format("NetworkProfile %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "networkOfferingId")); + } + } diff --git a/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java b/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java index 64b2aeedf12..e4cf4ec526f 100644 --- a/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java +++ b/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java @@ -63,6 +63,10 @@ public class LoadBalancingRule { return lb.getId(); } + public LoadBalancer getLb() { + return lb; + } + public String getName() { return lb.getName(); } diff --git a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java index bbb9771d27a..ffa8af4576d 100644 --- a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java +++ b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java @@ -39,7 +39,7 @@ public interface RemoteAccessVpnService { VpnUser addVpnUser(long vpnOwnerId, String userName, String password); - boolean removeVpnUser(long vpnOwnerId, String userName, Account caller); + boolean removeVpnUser(Account vpnOwner, String userName, Account caller); List listVpnUsers(long vpnOwnerId, String userName); diff --git a/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java b/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java index ab6e6fb6c5a..3b61367e3b4 100644 --- a/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java +++ b/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java @@ -19,6 +19,7 @@ package com.cloud.region.ha; import java.util.List; +import com.cloud.user.Account; import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.DeleteGlobalLoadBalancerRuleCmd; @@ -39,7 +40,7 @@ public interface GlobalLoadBalancingRulesService { GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalancerRuleCmd updateGslbCmd); - boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException; + boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException; /* * methods for managing sites participating in global load balancing diff --git a/api/src/main/java/com/cloud/storage/StorageStats.java b/api/src/main/java/com/cloud/storage/StorageStats.java index a474b23489c..502e2aaae40 100644 --- a/api/src/main/java/com/cloud/storage/StorageStats.java +++ b/api/src/main/java/com/cloud/storage/StorageStats.java @@ -26,4 +26,7 @@ public interface StorageStats { * @return bytes capacity of the storage server */ public long getCapacityBytes(); + + Long getCapacityIops(); + Long getUsedIops(); } diff --git a/api/src/main/java/com/cloud/vm/NicProfile.java b/api/src/main/java/com/cloud/vm/NicProfile.java index 183c8dcb2d5..a0c80ceb1bf 100644 --- a/api/src/main/java/com/cloud/vm/NicProfile.java +++ b/api/src/main/java/com/cloud/vm/NicProfile.java @@ -450,6 +450,9 @@ public class NicProfile implements InternalIdentity, Serializable { @Override public String toString() { - return String.format("NicProfile %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "vmId", "deviceId", "broadcastUri", "reservationId", "iPv4Address")); + return String.format("NicProfile %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vmId", "deviceId", + "broadcastUri", "reservationId", "iPv4Address")); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index bf8b79b29d0..cf03f1d2699 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -509,6 +509,7 @@ public class ApiConstants { public static final String URL = "url"; public static final String USAGE_INTERFACE = "usageinterface"; public static final String USED_SUBNETS = "usedsubnets"; + public static final String USED_IOPS = "usediops"; public static final String USER_DATA = "userdata"; public static final String USER_DATA_NAME = "userdataname"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java index 88eeadb9b13..397f9c80735 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java @@ -125,8 +125,9 @@ public class UpdateHostCmd extends BaseCmd { hostResponse.setResponseName(getCommandName()); this.setResponseObject(hostResponse); } catch (Exception e) { - logger.debug("Failed to update host:" + getId(), e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update host:" + getId() + "," + e.getMessage()); + Host host = _entityMgr.findById(Host.class, getId()); + logger.debug("Failed to update host: {} with id {}", host, getId(), e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to update host: %s with id %d, %s", host, getId(), e.getMessage())); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java index 6bebdc09f59..cdd908dfb87 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java @@ -124,10 +124,10 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { if (account.getType() == Account.Type.PROJECT) { Project project = _projectService.findByProjectAccountId(vmsnapshot.getAccountId()); if (project == null) { - throw new InvalidParameterValueException("Unable to find project by account id=" + account.getUuid()); + throw new InvalidParameterValueException(String.format("Unable to find project by account %s", account)); } if (project.getState() != Project.State.Active) { - throw new PermissionDeniedException("Can't add resources to the project id=" + project.getUuid() + " in state=" + project.getState() + " as it's no longer active"); + throw new PermissionDeniedException(String.format("Can't add resources to the project %s in state=%s as it's no longer active", project, project.getState())); } } else if (account.getState() == Account.State.DISABLED) { throw new PermissionDeniedException("The owner of template is disabled: " + account); @@ -164,8 +164,9 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { @Override public void execute() { - logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot id:" + getVMSnapshotId() + " and snapshot id:" + getEntityId() + " starts:" + System.currentTimeMillis()); - CallContext.current().setEventDetails("Vm Snapshot Id: "+ this._uuidMgr.getUuid(VMSnapshot.class, getVMSnapshotId())); + VMSnapshot vmSnapshot = _vmSnapshotService.getVMSnapshotById(getVMSnapshotId()); + logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot {} with id {} and snapshot [id: {}, uuid: {}]", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid()); + CallContext.current().setEventDetails("Vm Snapshot Id: " + vmSnapshot.getUuid()); Snapshot snapshot = null; try { snapshot = _snapshotService.backupSnapshotFromVmSnapshot(getEntityId(), getVmId(), getVolumeId(), getVMSnapshotId()); @@ -174,19 +175,19 @@ public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { response.setResponseName(getCommandName()); this.setResponseObject(response); } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot)); } } catch (InvalidParameterValueException ex) { throw ex; } catch (Exception e) { logger.debug("Failed to create snapshot", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot)); } finally { if (snapshot == null) { try { _snapshotService.deleteSnapshot(getEntityId(), null); } catch (Exception e) { - logger.debug("Failed to clean failed snapshot" + getEntityId()); + logger.debug("Failed to clean failed snapshot {} with id {}", () -> _entityMgr.findById(Snapshot.class, getEntityId()), this::getEntityId); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java index 48e7a9ee519..0697987b04d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java @@ -104,7 +104,7 @@ public class RemoveVpnUserCmd extends BaseAsyncCmd { public void execute() { Account owner = _accountService.getAccount(getEntityOwnerId()); long ownerId = owner.getId(); - boolean result = _ravService.removeVpnUser(ownerId, userName, CallContext.current().getCallingAccount()); + boolean result = _ravService.removeVpnUser(owner, userName, CallContext.current().getCallingAccount()); if (!result) { String errorMessage = String.format("Failed to remove VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); logger.error(errorMessage); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java index 06d5103d731..676803ea86b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -97,6 +97,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "total min IOPS currently in use by volumes") private Long allocatedIops; + @SerializedName(ApiConstants.USED_IOPS) + @Param(description = "total IOPS currently in use", since = "4.20.1") + private Long usedIops; + @SerializedName(ApiConstants.STORAGE_CUSTOM_STATS) @Param(description = "the storage pool custom stats", since = "4.18.1") private Map customStats; @@ -312,6 +316,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { this.allocatedIops = allocatedIops; } + public Long getUsedIops() { + return usedIops; + } + + public void setUsedIops(Long usedIops) { + this.usedIops = usedIops; + } + public Map getCustomStats() { return customStats; } diff --git a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java index 15f7fcd8174..665f95842b0 100644 --- a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java +++ b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.Adapter; @@ -55,8 +56,8 @@ public interface ClusterDrsAlgorithm extends Adapter { * @throws ConfigurationException * if there is an error in the configuration */ - boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException; + boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException; /** @@ -79,7 +80,7 @@ public interface ClusterDrsAlgorithm extends Adapter { * * @return a ternary containing improvement, cost, benefit */ - Ternary getMetrics(long clusterId, VirtualMachine vm, ServiceOffering serviceOffering, + Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException; diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 5697a040b81..0802098cb4f 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -17,7 +17,7 @@ package org.apache.cloudstack.vm; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.List; @@ -179,6 +179,13 @@ public class UnmanagedInstanceTO { this.vncPassword = vncPassword; } + @Override + public String toString() { + return String.format("UnmanagedInstanceTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "name", "internalCSName", "hostName", "clusterName")); + } + public static class Disk { private String diskId; @@ -322,12 +329,9 @@ public class UnmanagedInstanceTO { @Override public String toString() { - return "Disk {" + - "diskId='" + diskId + '\'' + - ", capacity=" + toHumanReadableSize(capacity) + - ", controller='" + controller + '\'' + - ", controllerUnit=" + controllerUnit + - "}"; + return String.format("Disk %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "diskId", "internalCSName", "controller", "controllerUnit")); } } @@ -424,11 +428,9 @@ public class UnmanagedInstanceTO { @Override public String toString() { - return "Nic{" + - "nicId='" + nicId + '\'' + - ", adapterType='" + adapterType + '\'' + - ", macAddress='" + macAddress + '\'' + - "}"; + return String.format("Nic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "nicId", "adapterType", "macAddress")); } } } diff --git a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java index b12c1b81d4a..e7ecbebae7b 100644 --- a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java +++ b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java @@ -41,16 +41,19 @@ public class LoadBalancerTOTest { LoadBalancerTO.AutoScaleVmGroupTO vmGroup; private static final Long counterId = 1L; + private static final String counterUuid = "1111-1111-1100"; private static final String counterName = "counter name"; private static final Counter.Source counterSource = Counter.Source.CPU; private static final String counterValue = "counter value"; private static final String counterProvider = "VIRTUALROUTER"; private static final Long conditionId = 2L; + private static final String conditionUuid = "1111-1111-1110"; private static final Long threshold = 100L; private static final Condition.Operator relationalOperator = Condition.Operator.GT; private static final Long scaleUpPolicyId = 11L; + private static final String scaleUpPolicyUuid = "1111-1111-1111"; private static final int scaleUpPolicyDuration = 61; private static final int scaleUpPolicyQuietTime = 31; private static final Date scaleUpPolicyLastQuietTime = new Date(); @@ -85,14 +88,14 @@ public class LoadBalancerTOTest { @Before public void setUp() { - counter = new LoadBalancerTO.CounterTO(counterId, counterName, counterSource, counterValue, counterProvider); - condition = new LoadBalancerTO.ConditionTO(conditionId, threshold, relationalOperator, counter); - scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyDuration, scaleUpPolicyQuietTime, - scaleUpPolicyLastQuietTime, AutoScalePolicy.Action.SCALEUP, - Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); - scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleDownPolicyDuration, scaleDownPolicyQuietTime, - scaleDownPolicyLastQuietTime, AutoScalePolicy.Action.SCALEDOWN, - Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); + counter = new LoadBalancerTO.CounterTO(counterId, counterUuid, counterName, counterSource, counterValue, counterProvider); + condition = new LoadBalancerTO.ConditionTO(conditionId, conditionUuid, threshold, relationalOperator, counter); + scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyUuid, scaleUpPolicyDuration, + scaleUpPolicyQuietTime, scaleUpPolicyLastQuietTime, + AutoScalePolicy.Action.SCALEUP, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); + scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleUpPolicyUuid, scaleDownPolicyDuration, + scaleDownPolicyQuietTime, scaleDownPolicyLastQuietTime, + AutoScalePolicy.Action.SCALEDOWN, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); vmProfile = new LoadBalancerTO.AutoScaleVmProfileTO(zoneId, domainId, cloudStackApiUrl, autoScaleUserApiKey, autoScaleUserSecretKey, serviceOfferingId, templateId, vmName, networkId, otherDeployParams, counterParamList, expungeVmGracePeriod); @@ -113,6 +116,7 @@ public class LoadBalancerTOTest { @Test public void testConditionTO() { Assert.assertEquals(conditionId, condition.getId()); + Assert.assertEquals(conditionUuid, condition.getUuid()); Assert.assertEquals((long) threshold, condition.getThreshold()); Assert.assertEquals(relationalOperator, condition.getRelationalOperator()); Assert.assertEquals(counter, condition.getCounter()); diff --git a/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java b/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java index 26e7b749586..79753661066 100644 --- a/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/GetStorageStatsAnswer.java @@ -27,24 +27,46 @@ public class GetStorageStatsAnswer extends Answer implements StorageStats { protected GetStorageStatsAnswer() { } - protected long used; + protected long usedBytes; - protected long capacity; + protected long capacityBytes; + + protected Long capacityIops; + + protected Long usedIops; @Override public long getByteUsed() { - return used; + return usedBytes; } @Override public long getCapacityBytes() { - return capacity; + return capacityBytes; } - public GetStorageStatsAnswer(GetStorageStatsCommand cmd, long capacity, long used) { + @Override + public Long getCapacityIops() { + return capacityIops; + } + + @Override + public Long getUsedIops() { + return usedIops; + } + + public GetStorageStatsAnswer(GetStorageStatsCommand cmd, long capacityBytes, long usedBytes) { super(cmd, true, null); - this.capacity = capacity; - this.used = used; + this.capacityBytes = capacityBytes; + this.usedBytes = usedBytes; + } + + public GetStorageStatsAnswer(GetStorageStatsCommand cmd, long capacityBytes, long usedBytes, Long capacityIops, Long usedIops) { + super(cmd, true, null); + this.capacityBytes = capacityBytes; + this.usedBytes = usedBytes; + this.capacityIops = capacityIops; + this.usedIops = usedIops; } public GetStorageStatsAnswer(GetStorageStatsCommand cmd, String details) { diff --git a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java index 42f1d264a50..e2d974e3878 100644 --- a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java @@ -19,6 +19,8 @@ package com.cloud.agent.api; +import com.cloud.host.Host; + import java.util.List; public class ReadyCommand extends Command { @@ -30,6 +32,8 @@ public class ReadyCommand extends Command { private Long dcId; private Long hostId; + private String hostUuid; + private String hostName; private List msHostList; private String lbAlgorithm; private Long lbCheckInterval; @@ -41,9 +45,11 @@ public class ReadyCommand extends Command { this.dcId = dcId; } - public ReadyCommand(final Long dcId, final Long hostId, boolean enableHumanReadableSizes) { - this(dcId); - this.hostId = hostId; + public ReadyCommand(final Host host, boolean enableHumanReadableSizes) { + this(host.getDataCenterId()); + this.hostId = host.getId(); + this.hostUuid = host.getUuid(); + this.hostName = host.getName(); this.enableHumanReadableSizes = enableHumanReadableSizes; } @@ -68,6 +74,14 @@ public class ReadyCommand extends Command { return hostId; } + public String getHostUuid() { + return hostUuid; + } + + public String getHostName() { + return hostName; + } + public List getMsHostList() { return msHostList; } diff --git a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java index 71652269b66..c619ce75ace 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java @@ -21,14 +21,18 @@ package com.cloud.agent.api; public class StartupAnswer extends Answer { long hostId; + String hostName; + String hostUuid; int pingInterval; protected StartupAnswer() { } - public StartupAnswer(StartupCommand cmd, long hostId, int pingInterval) { + public StartupAnswer(StartupCommand cmd, long hostId, String hostUuid, String hostName, int pingInterval) { super(cmd); this.hostId = hostId; + this.hostUuid = hostUuid; + this.hostName = hostName; this.pingInterval = pingInterval; } @@ -40,6 +44,14 @@ public class StartupAnswer extends Answer { return hostId; } + public String getHostUuid() { + return hostUuid; + } + + public String getHostName() { + return hostName; + } + public int getPingInterval() { return pingInterval; } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java index 046a2ab9410..4bf29205673 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java @@ -23,6 +23,7 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreInfo; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class ImageStoreTO implements DataStoreTO { private String type; @@ -78,15 +79,9 @@ public class ImageStoreTO implements DataStoreTO { @Override public String toString() { - return new StringBuilder("ImageStoreTO[type=").append(type) - .append("|provider=") - .append(providerName) - .append("|role=") - .append(role) - .append("|uri=") - .append(uri) - .append("]") - .toString(); + return String.format("ImageStoreTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "uuid", "type", "providerName", "role", "uri")); } @Override diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index a6a74176c13..2c758fa5087 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.StoragePoolType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class PrimaryDataStoreTO implements DataStoreTO { public static final String MANAGED = PrimaryDataStore.MANAGED; @@ -145,15 +146,9 @@ public class PrimaryDataStoreTO implements DataStoreTO { @Override public String toString() { - return new StringBuilder("PrimaryDataStoreTO[uuid=").append(uuid) - .append("|name=") - .append(name) - .append("|id=") - .append(id) - .append("|pooltype=") - .append(poolType) - .append("]") - .toString(); + return String.format("PrimaryDataStoreTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "poolType")); } public Boolean isFullCloneFlag() { diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java index eafe8f83269..dc68b31a3fd 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -27,6 +27,7 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.Storage.ImageFormat; import com.cloud.template.VirtualMachineTemplate; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class TemplateObjectTO extends DownloadableObjectTO implements DataTO { private String path; @@ -264,6 +265,8 @@ public class TemplateObjectTO extends DownloadableObjectTO implements DataTO { @Override public String toString() { - return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString(); + return String.format("TemplateTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "origUrl")); } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 6514038ac62..4d1d0bf9097 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -30,6 +30,7 @@ import com.cloud.offering.DiskOffering.DiskCacheMode; import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage; import com.cloud.storage.Volume; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.Arrays; @@ -258,7 +259,9 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO { @Override public String toString() { - return new StringBuilder("volumeTO[uuid=").append(uuid).append("|path=").append(path).append("|datastore=").append(dataStore).append("]").toString(); + return String.format("volumeTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "path", "dataStore")); } public void setBytesReadRate(Long bytesReadRate) { diff --git a/core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java b/core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java new file mode 100644 index 00000000000..44af83ada2d --- /dev/null +++ b/core/src/test/java/com/cloud/agent/api/GetStorageStatsAnswerTest.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class GetStorageStatsAnswerTest { + + @Test + public void testDefaultConstructor() { + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(); + + Assert.assertEquals(0, answer.getByteUsed()); + Assert.assertEquals(0, answer.getCapacityBytes()); + Assert.assertNull(answer.getCapacityIops()); + Assert.assertNull(answer.getUsedIops()); + } + + @Test + public void testConstructorWithCapacityAndUsedBytes() { + GetStorageStatsCommand mockCmd = new GetStorageStatsCommand(); + long capacityBytes = 1024L; + long usedBytes = 512L; + + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(mockCmd, capacityBytes, usedBytes); + + Assert.assertEquals(capacityBytes, answer.getCapacityBytes()); + Assert.assertEquals(usedBytes, answer.getByteUsed()); + Assert.assertNull(answer.getCapacityIops()); + Assert.assertNull(answer.getUsedIops()); + } + + @Test + public void testConstructorWithIops() { + GetStorageStatsCommand mockCmd = new GetStorageStatsCommand(); + long capacityBytes = 2048L; + long usedBytes = 1024L; + Long capacityIops = 1000L; + Long usedIops = 500L; + + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(mockCmd, capacityBytes, usedBytes, capacityIops, usedIops); + + Assert.assertEquals(capacityBytes, answer.getCapacityBytes()); + Assert.assertEquals(usedBytes, answer.getByteUsed()); + Assert.assertEquals(capacityIops, answer.getCapacityIops()); + Assert.assertEquals(usedIops, answer.getUsedIops()); + } + + @Test + public void testErrorConstructor() { + GetStorageStatsCommand mockCmd = new GetStorageStatsCommand(); + String errorDetails = "An error occurred"; + + GetStorageStatsAnswer answer = new GetStorageStatsAnswer(mockCmd, errorDetails); + + Assert.assertFalse(answer.getResult()); + Assert.assertEquals(errorDetails, answer.getDetails()); + Assert.assertEquals(0, answer.getCapacityBytes()); + Assert.assertEquals(0, answer.getByteUsed()); + Assert.assertNull(answer.getCapacityIops()); + Assert.assertNull(answer.getUsedIops()); + } +} diff --git a/debian/control b/debian/control index c0cb95af035..a773844c27c 100644 --- a/debian/control +++ b/debian/control @@ -24,7 +24,7 @@ Description: CloudStack server library Package: cloudstack-agent Architecture: all -Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd +Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd, sysstat Recommends: init-system-helpers Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java index 254c91d3544..df78928ddc3 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java @@ -24,6 +24,8 @@ import com.cloud.agent.api.Command; public interface EndPoint { long getId(); + String getUuid(); + String getHostAddr(); String getPublicAddr(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java index 6ac4030e1a6..6b9a48b5a53 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java @@ -19,12 +19,22 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.exception.StorageConflictException; +import com.cloud.host.Host; +import com.cloud.storage.StoragePool; public interface HypervisorHostListener { boolean hostAdded(long hostId); + default boolean hostConnect(Host host, StoragePool pool) throws StorageConflictException { + return hostConnect(host.getId(), pool.getId()); + } + boolean hostConnect(long hostId, long poolId) throws StorageConflictException; + default boolean hostDisconnected(Host host, StoragePool pool) throws StorageConflictException { + return hostDisconnected(host.getId(), pool.getId()); + } + boolean hostDisconnected(long hostId, long poolId); boolean hostAboutToBeRemoved(long hostId); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 2011b1f08fb..c8d9015af90 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -111,6 +111,14 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver { */ Pair getStorageStats(StoragePool storagePool); + /** + * Intended for managed storage + * returns the capacity and used IOPS or null if not supported + */ + default Pair getStorageIopsStats(StoragePool storagePool) { + return null; + } + /** * intended for managed storage * returns true if the storage can provide the volume stats (physical and virtual size) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index 3bd3100e84e..1f7bf45a15a 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -22,6 +22,8 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.UserData; public interface TemplateInfo extends DownloadableDataInfo, VirtualMachineTemplate { + VirtualMachineTemplate getImage(); + @Override String getUniqueName(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index df13f951a44..115cf024617 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.template.VirtualMachineTemplate; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; @@ -60,7 +61,7 @@ public interface TemplateService { AsyncCallFuture deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool); - void syncTemplateToRegionStore(long templateId, DataStore store); + void syncTemplateToRegionStore(VirtualMachineTemplate templateId, DataStore store); void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId); diff --git a/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java b/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java index 2182dfc542d..81525ca13f1 100644 --- a/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java +++ b/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java @@ -50,6 +50,10 @@ public interface AgentManager { ConfigKey ReadyCommandWait = new ConfigKey("Advanced", Integer.class, "ready.command.wait", "60", "Time in seconds to wait for Ready command to return", true); + ConfigKey GranularWaitTimeForCommands = new ConfigKey<>("Advanced", String.class, "commands.timeout", "", + "This timeout overrides the wait global config. This holds a comma separated key value pairs containing timeout (in seconds) for specific commands. " + + "For example: DhcpEntryCommand=600, SavePasswordCommand=300, VmDataCommand=300", false); + public enum TapAgentsAction { Add, Del, Contains, } diff --git a/engine/components-api/src/main/java/com/cloud/agent/Listener.java b/engine/components-api/src/main/java/com/cloud/agent/Listener.java index 843a634b4c0..ceba5f34b82 100644 --- a/engine/components-api/src/main/java/com/cloud/agent/Listener.java +++ b/engine/components-api/src/main/java/com/cloud/agent/Listener.java @@ -43,6 +43,10 @@ public interface Listener { */ boolean processAnswers(long agentId, long seq, Answer[] answers); + default boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { + return processAnswers(agentId, seq, answers); + } + /** * This method is called by the AgentManager when an agent sent * a command to the server. In order to process these commands, @@ -92,6 +96,10 @@ public interface Listener { */ boolean processDisconnect(long agentId, Status state); + default boolean processDisconnect(long agentId, String uuid, String name, Status state) { + return processDisconnect(agentId, state); + } + /** * This method is called by AgentManager when a host is about to be removed from a cluster. * @param long the ID of the host that's about to be removed diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index e1bb10f5d26..cbd137e8682 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -140,7 +140,7 @@ public interface CapacityManager { * @param ram required RAM * @param cpuOverprovisioningFactor factor to apply to the actual host cpu */ - boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, + boolean checkIfHostHasCapacity(Host host, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, boolean considerReservedCapacity); void updateCapacityForHost(Host host); diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java index 01fd54430d6..1694b19c33f 100644 --- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java @@ -238,7 +238,7 @@ public interface ConfigurationManager { * @param domainId * @return success/failure */ - boolean releaseDomainSpecificVirtualRanges(long domainId); + boolean releaseDomainSpecificVirtualRanges(Domain domain); /** * Release dedicated virtual ip ranges of an account. @@ -246,7 +246,7 @@ public interface ConfigurationManager { * @param accountId * @return success/failure */ - boolean releaseAccountSpecificVirtualRanges(long accountId); + boolean releaseAccountSpecificVirtualRanges(Account account); /** * Edits a pod in the database. Will not allow you to edit pods that are being used anywhere in the system. diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java index 36937460b20..b1cad20b19e 100644 --- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java @@ -19,6 +19,7 @@ package com.cloud.network; import java.util.Date; import java.util.List; +import com.cloud.user.User; import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse; import org.apache.cloudstack.framework.config.ConfigKey; @@ -88,7 +89,7 @@ public interface IpAddressManager { * @param caller * @return true if it did; false if it didn't */ - boolean disassociatePublicIpAddress(long id, long userId, Account caller); + boolean disassociatePublicIpAddress(IpAddress ipAddress, long userId, Account caller); boolean applyRules(List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException; @@ -191,7 +192,7 @@ public interface IpAddressManager { PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat) throws ConcurrentOperationException, InsufficientAddressCapacityException; - IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone, Boolean displayIp, String ipaddress) + IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, User callerId, DataCenter zone, Boolean displayIp, String ipaddress) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List vlanDbIds, Long networkId, String requestedIp, String requestedGateway, boolean isSystem) diff --git a/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java b/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java index d61b446cad7..669456cbdcc 100644 --- a/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java @@ -62,7 +62,7 @@ public interface LoadBalancingRulesManager { */ boolean removeVmFromLoadBalancers(long vmId); - boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException; + boolean applyLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException; String getLBCapability(long networkid, String capabilityName); @@ -74,7 +74,7 @@ public interface LoadBalancingRulesManager { boolean configureLbAutoScaleVmGroup(long vmGroupid, AutoScaleVmGroup.State currentState) throws ResourceUnavailableException; - boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException; + boolean revokeLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException; boolean validateLbRule(LoadBalancingRule lbRule); diff --git a/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java b/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java index 0471086c43d..1a79135f25e 100644 --- a/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java @@ -20,6 +20,8 @@ import java.util.List; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.firewall.FirewallService; import com.cloud.network.rules.FirewallRule.FirewallRuleType; @@ -53,7 +55,7 @@ public interface FirewallManager extends FirewallService { public void revokeRule(FirewallRuleVO rule, Account caller, long userId, boolean needUsageEvent); - boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException; + boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException; // /** // * Revokes a firewall rule @@ -75,7 +77,7 @@ public interface FirewallManager extends FirewallService { FirewallRule createRuleForAllCidrs(long ipAddrId, Account caller, Integer startPort, Integer endPort, String protocol, Integer icmpCode, Integer icmpType, Long relatedRuleId, long networkId) throws NetworkRuleConflictException; - boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException; + boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException; boolean revokeFirewallRulesForVm(long vmId); diff --git a/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java b/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java index c77874329fc..79ffdfdb973 100644 --- a/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java @@ -22,6 +22,7 @@ import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; +import com.cloud.network.Network; import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.vm.Nic; @@ -47,7 +48,7 @@ public interface RulesManager extends RulesService { FirewallRule[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose purpose, boolean openFirewall, Account caller, int... ports) throws NetworkRuleConflictException; - boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller); + boolean applyStaticNatsForNetwork(Network network, boolean continueOnError, Account caller); void getSystemIpAndEnableStaticNatForVm(VirtualMachine vm, boolean getNewIp) throws InsufficientAddressCapacityException; @@ -60,7 +61,7 @@ public interface RulesManager extends RulesService { * @param forRevoke * @return */ - boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, Account caller, boolean forRevoke); + boolean applyStaticNatForNetwork(Network network, boolean continueOnError, Account caller, boolean forRevoke); List listAssociatedRulesForGuestNic(Nic nic); diff --git a/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java b/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java index ffca4bb013b..6e2270ffb10 100644 --- a/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java @@ -19,6 +19,7 @@ package com.cloud.network.security; import java.util.HashMap; import java.util.List; +import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; /** @@ -36,9 +37,9 @@ public interface SecurityGroupManager { public SecurityGroupVO createDefaultSecurityGroup(Long accountId); - public boolean addInstanceToGroups(Long userVmId, List groups); + public boolean addInstanceToGroups(UserVm userVm, List groups); - public void removeInstanceFromGroups(long userVmId); + public void removeInstanceFromGroups(UserVm userVm); public void fullSync(long agentId, HashMap> newGroupStates); diff --git a/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java b/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java index e7f02e62045..626c4da1bb1 100644 --- a/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import com.cloud.network.dao.IPAddressVO; import com.cloud.utils.Pair; import org.apache.cloudstack.acl.ControlledEntity.ACLType; @@ -100,6 +101,8 @@ public interface VpcManager { */ void unassignIPFromVpcNetwork(long ipId, long networkId); + void unassignIPFromVpcNetwork(final IPAddressVO ip, final Network network); + /** * Creates guest network in the VPC * diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index b2ae8b89837..343ad0fa212 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -185,7 +185,7 @@ public interface ResourceManager extends ResourceService, Configurable { * @param vgpuType the VGPU type * @return true when the host has the capacity with given VGPU type */ - boolean isGPUDeviceAvailable(long hostId, String groupName, String vgpuType); + boolean isGPUDeviceAvailable(Host host, String groupName, String vgpuType); /** * Get available GPU device diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index b5153668899..0b9f7bcb7db 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -365,9 +365,9 @@ public interface StorageManager extends StorageService { String getStoragePoolMountFailureReason(String error); - boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; - void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException; void enableHost(long hostId) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java index 997ae3985f1..b8912526fdf 100644 --- a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java +++ b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java @@ -120,7 +120,7 @@ public interface TemplateManager { DataStore getImageStore(long tmpltId); - Long getTemplateSize(long templateId, long zoneId); + Long getTemplateSize(VirtualMachineTemplate template, long zoneId); DataStore getImageStore(String storeUuid, Long zoneId, VolumeVO volume); @@ -143,7 +143,7 @@ public interface TemplateManager { TemplateType validateTemplateType(BaseCmd cmd, boolean isAdmin, boolean isCrossZones); - List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role, String configurationId); + List getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId); static Boolean getValidateUrlIsResolvableBeforeRegisteringTemplateValue() { return ValidateUrlIsResolvableBeforeRegisteringTemplate.value(); diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java index c82edc70ded..ef1c71e6b01 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java @@ -26,9 +26,7 @@ import org.apache.cloudstack.jobs.JobInfo; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import com.cloud.serializer.GsonHelper; import com.cloud.utils.Pair; -import com.google.gson.Gson; /** * VmWorkJobHandlerProxy can not be used as standalone due to run-time @@ -44,10 +42,8 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler { private Object _target; private Map, Method> _handlerMethodMap = new HashMap, Method>(); - private Gson _gsonLogger; public VmWorkJobHandlerProxy(Object target) { - _gsonLogger = GsonHelper.getGsonLogger(); buildLookupMap(target.getClass()); _target = target; @@ -123,10 +119,10 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler { throw e; } } else { - logger.error("Unable to find handler for VM work job: " + work.getClass().getName() + _gsonLogger.toJson(work)); + logger.error("Unable to find handler for VM work job: {} {}", work.getClass().getName(), work); RuntimeException ex = new RuntimeException("Unable to find handler for VM work job: " + work.getClass().getName()); - return new Pair(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); + return new Pair<>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index c0f7586aee0..30a58d405c9 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit; import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -111,6 +112,7 @@ public abstract class AgentAttache { protected static String LOG_SEQ_FORMATTED_STRING; protected final long _id; + protected String _uuid; protected String _name = null; protected final ConcurrentHashMap _waitForList; protected final LinkedList _requests; @@ -133,8 +135,9 @@ public abstract class AgentAttache { Arrays.sort(s_commandsNotAllowedInConnectingMode); } - protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final boolean maintenance) { + protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final boolean maintenance) { _id = id; + _uuid = uuid; _name = name; _waitForList = new ConcurrentHashMap(); _currentSequence = null; @@ -145,6 +148,13 @@ public abstract class AgentAttache { LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id); } + @Override + public String toString() { + return String.format("AgentAttache %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "_id", "_uuid", "_name")); + } + public synchronized long getNextSequence() { return ++_nextSequence; } @@ -206,7 +216,7 @@ public abstract class AgentAttache { logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling."); final Listener listener = _waitForList.remove(seq); if (listener != null) { - listener.processDisconnect(_id, Status.Disconnected); + listener.processDisconnect(_id, _uuid, _name, Status.Disconnected); } int index = findRequest(seq); if (index >= 0) { @@ -243,6 +253,10 @@ public abstract class AgentAttache { return _id; } + public String getUuid() { + return _uuid; + } + public String getName() { return _name; } @@ -316,7 +330,7 @@ public abstract class AgentAttache { it.remove(); final Listener monitor = entry.getValue(); logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass()); - monitor.processDisconnect(_id, state); + monitor.processDisconnect(_id, _uuid, _name, state); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 27b3ac2d751..63e97519534 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -53,6 +53,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.BooleanUtils; import com.cloud.agent.AgentManager; @@ -139,6 +140,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected List> _cmdMonitors = new ArrayList>(17); protected List> _creationMonitors = new ArrayList>(17); protected List _loadingAgents = new ArrayList(); + protected Map _commandTimeouts = new HashMap<>(); private int _monitorId = 0; private final Lock _agentStatusLock = new ReentrantLock(); @@ -241,6 +243,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); + initializeCommandTimeouts(); + return true; } @@ -302,7 +306,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId()); + logger.warn("No handling of agent control command: {} sent from {}", cmd, attache); return new AgentControlAnswer(cmd); } @@ -344,7 +348,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl answer = easySend(targetHostId, cmd); } catch (final Exception e) { String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(), - host.getUuid(), e.getLocalizedMessage()); + host, e.getLocalizedMessage()); logger.error(errorMsg); logger.debug(errorMsg, e); } @@ -424,6 +428,62 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + protected int getTimeout(final Commands commands, int timeout) { + int result; + if (timeout > 0) { + result = timeout; + } else { + result = Wait.value(); + } + + int granularTimeout = getTimeoutFromGranularWaitTime(commands); + return (granularTimeout > 0) ? granularTimeout : result; + } + + protected int getTimeoutFromGranularWaitTime(final Commands commands) { + int maxWait = 0; + if (MapUtils.isNotEmpty(_commandTimeouts)) { + for (final Command cmd : commands) { + String simpleCommandName = cmd.getClass().getSimpleName(); + Integer commandTimeout = _commandTimeouts.get(simpleCommandName); + if (commandTimeout != null && commandTimeout > maxWait) { + maxWait = commandTimeout; + } + } + } + + return maxWait; + } + + private void initializeCommandTimeouts() { + String commandWaits = GranularWaitTimeForCommands.value().trim(); + if (StringUtils.isNotEmpty(commandWaits)) { + _commandTimeouts = getCommandTimeoutsMap(commandWaits); + logger.info(String.format("Timeouts for management server internal commands successfully initialized from global setting commands.timeout: %s", _commandTimeouts)); + } + } + + private Map getCommandTimeoutsMap(String commandWaits) { + String[] commandPairs = commandWaits.split(","); + Map commandTimeouts = new HashMap<>(); + + for (String commandPair : commandPairs) { + String[] parts = commandPair.trim().split("="); + if (parts.length == 2) { + try { + String commandName = parts[0].trim(); + int commandTimeout = Integer.parseInt(parts[1].trim()); + commandTimeouts.put(commandName, commandTimeout); + } catch (NumberFormatException e) { + logger.error(String.format("Initialising the timeouts using commands.timeout: %s for management server internal commands failed with error %s", commandPair, e.getMessage())); + } + } else { + logger.error(String.format("Error initialising the timeouts for management server internal commands. Invalid format in commands.timeout: %s", commandPair)); + } + } + return commandTimeouts; + } + @Override public Answer[] send(final Long hostId, final Commands commands, int timeout) throws AgentUnavailableException, OperationTimedoutException { assert hostId != null : "Who's not checking the agent id before sending? ... (finger wagging)"; @@ -431,8 +491,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl throw new AgentUnavailableException(-1); } - if (timeout <= 0) { - timeout = Wait.value(); + int wait = getTimeout(commands, timeout); + logger.debug(String.format("Wait time setting on %s is %d seconds", commands, wait)); + for (Command cmd : commands) { + String simpleCommandName = cmd.getClass().getSimpleName(); + Integer commandTimeout = _commandTimeouts.get(simpleCommandName); + if (commandTimeout != null) { + cmd.setWait(wait); + } } if (CheckTxnBeforeSending.value()) { @@ -454,7 +520,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Request req = new Request(hostId, agent.getName(), _nodeId, cmds, commands.stopOnError(), true); req.setSequence(agent.getNextSequence()); - final Answer[] answers = agent.send(req, timeout); + final Answer[] answers = agent.send(req, wait); notifyAnswersToMonitors(hostId, req.getSequence(), answers); commands.setAnswers(answers); return answers; @@ -464,11 +530,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - logger.debug("Checking if agent ({}) is alive", hostId); + logger.debug("Checking if agent ({}) is alive", host); final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status); + logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", host, status); return status; } return _haMgr.investigate(hostId); @@ -493,7 +559,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public long send(final Long hostId, final Commands commands, final Listener listener) throws AgentUnavailableException { final AgentAttache agent = getAttache(hostId); if (agent.isClosed()) { - throw new AgentUnavailableException("Agent " + agent.getId() + " is closed", agent.getId()); + throw new AgentUnavailableException(String.format( + "Agent [id: %d, name: %s] is closed", + agent.getId(), agent.getName()), agent.getId()); } final Command[] cmds = checkForCommandsAndTag(commands); @@ -510,7 +578,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return; } final long hostId = attache.getId(); - logger.debug("Remove Agent : {}", hostId); + logger.debug("Remove Agent : {}", attache); AgentAttache removed = null; boolean conflict = false; synchronized (_agents) { @@ -522,7 +590,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } if (conflict) { - logger.debug("Agent for host {} is created when it is being disconnected", hostId); + logger.debug("Agent for host {} is created when it is being disconnected", attache); } if (removed != null) { removed.disconnect(nextState); @@ -530,7 +598,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl for (final Pair monitor : _hostMonitors) { logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName()); - monitor.second().processDisconnect(hostId, nextState); + monitor.second().processDisconnect(hostId, attache.getUuid(), attache.getName(), nextState); } } @@ -555,28 +623,31 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (e instanceof ConnectionException) { final ConnectionException ce = (ConnectionException)e; if (ce.isSetupError()) { - logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); + logger.warn("Monitor {} says there is an error in the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw ce; } else { - logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); + logger.info("Monitor {} says not to continue the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); return attache; } } else if (e instanceof HypervisorVersionChangedException) { handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); } else { - logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); + logger.error("Monitor {} says there is an error in the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); } } } } final Long dcId = host.getDataCenterId(); - final ReadyCommand ready = new ReadyCommand(dcId, host.getId(), NumbersUtil.enableHumanReadableSizes); + final ReadyCommand ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); ready.setWait(ReadyCommandWait.value()); final Answer answer = easySend(hostId, ready); if (answer == null || !answer.getResult()) { @@ -590,7 +661,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl Map detailsMap = readyAnswer.getDetailsMap(); if (detailsMap != null) { String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE); - logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid()); + logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host); if (uefiEnabled != null) { _hostDao.loadDetails(host); if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { @@ -707,14 +778,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if (discoverer == null) { - logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType()); + logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host, host.getHypervisorType()); resource = loadResourcesWithoutHypervisor(host); } else { resource = discoverer.reloadResource(host); } if (resource == null) { - logger.warn("Unable to load the resource: {}", host.getId()); + logger.warn("Unable to load the resource: {}", host); return false; } @@ -734,14 +805,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return h == null ? false : true; } else { - _executor.execute(new SimulateStartTask(host.getId(), resource, host.getDetails())); + _executor.execute(new SimulateStartTask(host.getId(), host.getUuid(), host.getName(), resource, host.getDetails())); return true; } } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { - logger.debug("create DirectAgentAttache for {}", host.getId()); - final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); + logger.debug("create DirectAgentAttache for {}", host); + final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { @@ -766,7 +837,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - logger.debug("Cant not find host {}", agent.getId()); + logger.debug("Cannot find host {}", agent); } else { if (!agent.forForward()) { agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); @@ -784,17 +855,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Status currentStatus = host.getStatus(); Status nextStatus; if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) { - logger.debug("Host {} is already {}", host.getUuid(), currentStatus); + logger.debug("Host {} is already {}", host, currentStatus); nextStatus = currentStatus; } else { try { nextStatus = currentStatus.getNextStatus(event); } catch (final NoTransitionException e) { - final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host.getUuid()); + final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host); logger.debug(err); throw new CloudRuntimeException(err); } - logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus); + logger.debug("The next status of agent {} is {}, current status is {}", host, nextStatus, currentStatus); } return nextStatus; } @@ -806,17 +877,18 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl GlobalLock joinLock = getHostJoinLock(hostId); if (joinLock.lock(60)) { try { - logger.info("Host {} is disconnecting with event {}", hostId, event); + logger.info("Host {} is disconnecting with event {}", + attache, event); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.warn("Can't find host with {}", hostId); + logger.warn("Can't find host with {} ({})", hostId, attache); nextStatus = Status.Removed; } else { nextStatus = getNextStatusOnDisconnection(host, event); caService.purgeHostCertificate(host); } - logger.debug("Deregistering link for {} with state {}", hostId, nextStatus); + logger.debug("Deregistering link for {} with state {}", attache, nextStatus); removeAgent(attache, nextStatus); @@ -851,28 +923,30 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ - logger.info("Investigating why host {} has disconnected with event", hostId, event); + logger.info("Investigating why host {} has disconnected with event", host, event); Status determinedState = investigate(attache); // if state cannot be determined do nothing and bail out if (determinedState == null) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value()); + logger.warn("Agent {} state cannot be determined for more than {} ({}) seconds, will go to Alert state", + host, AlertWait, AlertWait.value()); determinedState = Status.Alert; } else { - logger.warn("Agent {} state cannot be determined, do nothing", hostId); + logger.warn("Agent {} state cannot be determined, do nothing", host); return false; } } final Status currentStatus = host.getStatus(); - logger.info("The agent from host {} state determined is {}", hostId, determinedState); + logger.info("The agent from host {} state determined is {}", host, determinedState); if (determinedState == Status.Down) { - final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs"; + final String message = String.format("Host %s is down. Starting HA on the VMs", host); logger.error(message); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), + host.getPodId(), String.format("Host down, %s", host), message); } event = Status.Event.HostDown; } else if (determinedState == Status.Up) { @@ -881,21 +955,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl agentStatusTransitTo(host, Status.Event.Ping, _nodeId); return false; } else if (determinedState == Status.Disconnected) { - logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() + - '-' + host.getResourceState()); + logger.warn("Agent is disconnected but the host is still up: {} state: {}", host, host.getResourceState()); if (currentStatus == Status.Disconnected || (currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId()); + logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host); event = Status.Event.WaitedTooLong; } else { - logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId()); + logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host); return false; } } else if (currentStatus == Status.Up) { final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + final String hostDesc = "name: " + host.getName() + " (id:" + host.getUuid() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host disconnected, " + hostDesc, "If the agent for host [" + hostDesc + "] is not restarted within " + AlertWait + " seconds, host will go to Alert state"); @@ -907,12 +980,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); final String podName = podVO != null ? podVO.getName() : "NO POD"; - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podName; - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host in ALERT state, " + hostDesc, - "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName()); + final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podName); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, + host.getDataCenterId(), host.getPodId(), + String.format("Host in ALERT state, %s", hostDesc), + String.format("In availability zone %s, host is in alert state: %s", dcVO, host)); } } else { - logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId()); + logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host); } } handleDisconnectWithoutInvestigation(attache, event, true, true); @@ -958,7 +1033,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final Status status = h.getStatus(); if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { - logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId); + logger.debug("Can not send command {} due to Host {} not being up", cmd, h); return null; } final Answer answer = send(hostId, cmd); @@ -988,6 +1063,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public Answer[] send(final Long hostId, final Commands cmds) throws AgentUnavailableException, OperationTimedoutException { int wait = 0; + if (cmds.size() > 1) { + logger.debug(String.format("Checking the wait time in seconds to be used for the following commands : %s. If there are multiple commands sent at once," + + "then max wait time of those will be used", cmds)); + } + for (final Command cmd : cmds) { if (cmd.getWait() > wait) { wait = cmd.getWait(); @@ -1004,21 +1084,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (host.getRemoved() != null) { - throw new CloudRuntimeException("Host has already been removed: " + hostId); + throw new CloudRuntimeException(String.format( + "Host has already been removed: %s", host)); } if (host.getStatus() == Status.Disconnected) { - logger.debug("Host is already disconnected, no work to be done: {}", hostId); + logger.debug("Host is already disconnected, no work to be done: {}", host); return; } if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) { - throw new CloudRuntimeException("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); + throw new CloudRuntimeException(String.format( + "Unable to disconnect host because it is not in the correct state: host=%s; Status=%s", + host, host.getStatus())); } AgentAttache attache = findAttache(hostId); if (attache == null) { - throw new CloudRuntimeException("Unable to disconnect host because it is not connected to this server: " + hostId); + throw new CloudRuntimeException(String.format( + "Unable to disconnect host because it is not connected to this server: %s", + host)); } disconnectWithoutInvestigation(attache, Event.ShutdownRequested); } @@ -1043,9 +1128,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host {}", hostId); AgentAttache attache = null; attache = findAttache(hostId); + logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); } @@ -1055,7 +1140,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { reconnect(hostId); } catch (CloudRuntimeException e) { - logger.debug("Error on shutdown request for hostID: {}", hostId, e); + logger.debug("Error on shutdown request for hostID: {} ({})", hostId, findAttache(hostId), e); return false; } return true; @@ -1070,8 +1155,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { - logger.debug("create ConnectedAgentAttache for {}", host.getId()); - final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); + logger.debug("create ConnectedAgentAttache for {}", host); + final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -1118,7 +1203,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl joinLock.unlock(); } } else { - throw new ConnectionException(true, "Unable to acquire lock on host " + host.getUuid()); + throw new ConnectionException(true, + String.format("Unable to acquire lock on host %s", host)); } joinLock.releaseRef(); return attache; @@ -1131,7 +1217,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup); if (host != null) { checkHostArchOnCluster(host); - ready = new ReadyCommand(host.getDataCenterId(), host.getId(), NumbersUtil.enableHumanReadableSizes); + ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); attache = sendReadyAndGetAttache(host, ready, link, startup); } } catch (final Exception e) { @@ -1171,8 +1257,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl ServerResource resource; Map details; long id; + String uuid; + String name; - public SimulateStartTask(final long id, final ServerResource resource, final Map details) { + public SimulateStartTask(final long id, String uuid, String name, final ServerResource resource, final Map details) { this.id = id; this.resource = resource; this.details = details; @@ -1181,26 +1269,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void runInContext() { try { - logger.debug("Simulating start for resource {} id {}", resource.getName(), id); + logger.debug("Simulating start for resource {} (id: {}, uuid: {}, name {})", resource.getName(), id, uuid, name); if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { final AgentAttache agentattache = findAttache(id); if (agentattache == null) { - logger.debug("Creating agent for host {}", id); + logger.debug("Creating agent for host [id: {}, uuid: {}, name: {}]", id, uuid, name); _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); - logger.debug("Completed creating agent for host {}", id); + logger.debug("Completed creating agent for host [id: {}, uuid: {}, name: {}", id, uuid, name); } else { - logger.debug("Agent already created in another thread for host {}, ignore this", id); + logger.debug("Agent already created in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name); } } finally { tapLoadingAgents(id, TapAgentsAction.Del); } } else { - logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id); + logger.debug("Agent creation already getting processed in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name); } } catch (final Exception e) { - logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e); + logger.warn("Unable to simulate start on resource [id: {}, uuid: {}, name: {}] name {}", id, uuid, name, resource.getName(), e); } } } @@ -1240,7 +1328,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl cmd = cmds[i]; if (cmd instanceof StartupRoutingCommand || cmd instanceof StartupProxyCommand || cmd instanceof StartupSecondaryStorageCommand || cmd instanceof StartupStorageCommand) { - answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, "", "", mgmtServiceConf.getPingInterval()); break; } } @@ -1270,7 +1358,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) { logger.debug("{} is disabled for the cluster {}, cannot process the health check result " + - "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()); + "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host); return; } @@ -1280,10 +1368,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.info("Host health check {}, auto {} KVM host: {}", hostHealthCheckResult ? "succeeds" : "fails", hostHealthCheckResult ? "enabling" : "disabling", - host.getName()); + host); _resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent); } catch (NoTransitionException e) { - logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e); + logger.error("Cannot Auto {} host: {}", resourceEvent, host, e); } } @@ -1330,11 +1418,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { logD = false; - logger.debug("Ping from Routing host {}({})", hostId, hostName); + logger.debug("Ping from Routing host {}", attache); logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else if (cmd instanceof PingCommand) { logD = false; - logger.debug("Ping from {}({})", hostId, hostName); + logger.debug("Ping from {}", attache); logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else { logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request); @@ -1349,20 +1437,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (cmd instanceof StartupRoutingCommand) { final StartupRoutingCommand startup = (StartupRoutingCommand) cmd; processStartupRoutingCommand(startup, hostId); - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupProxyCommand) { final StartupProxyCommand startup = (StartupProxyCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupSecondaryStorageCommand) { final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupStorageCommand) { final StartupStorageCommand startup = (StartupStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail()); + logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache, reason, shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); @@ -1392,7 +1480,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl // gateway (cannot ping the default route) final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podVO); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc + "] lost connection to gateway (default route) and is possibly having network connection issues."); @@ -1410,7 +1498,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - logger.debug("Cant not find host {}", attache.getId()); + logger.debug("Cant not find host with id: {} ({})", attache.getId(), attache); } answer = new Answer(cmd); } else { @@ -1442,7 +1530,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (attache == null) { logger.warn("Unable to process: {}", response); } else if (!attache.processAnswers(response.getSequence(), response)) { - logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response); + logger.info("Host {} - Seq {}: Response is not processed: {}", attache, response.getSequence(), response); } } @@ -1512,14 +1600,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { try { _agentStatusLock.lock(); - logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName()); + logger.debug("[Resource state = {}, Agent event = , Host = {}]", + host.getResourceState(), e.toString(), host); host.setManagementServerId(msId); try { return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId); - throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage()); + logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); + throw new CloudRuntimeException(String.format( + "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); } } finally { _agentStatusLock.unlock(); @@ -1600,7 +1690,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl attache = createAttacheForDirectConnect(host, resource); final StartupAnswer[] answers = new StartupAnswer[cmds.length]; for (int i = 0; i < answers.length; i++) { - answers[i] = new StartupAnswer(cmds[i], attache.getId(), mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer(cmds[i], attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } attache.process(answers); @@ -1650,7 +1740,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public void pingBy(final long agentId) { // Update PingMap with the latest time if agent entry exists in the PingMap if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) { - logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); + logger.info("PingMap for agent: {} ({}) will not be updated because agent is no longer in the PingMap", agentId, findAttache(agentId)); } } @@ -1671,17 +1761,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl /* * Host is in non-operation state, so no investigation and direct put agent to Disconnected */ - logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState); + logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", h, resourceState); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { final HostVO host = _hostDao.findById(agentId); if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) { - logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId()); + logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { - logger.debug("Ping timeout for agent {}, do investigation", agentId); + logger.debug("Ping timeout for agent {}, do investigation", h); disconnectWithInvestigation(agentId, Event.PingTimeout); } } @@ -1802,7 +1892,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, - DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait }; + DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, GranularWaitTimeForCommands }; } protected class SetHostParamsListener implements Listener { @@ -1844,7 +1934,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl Commands c = new Commands(cmds); send(host.getId(), c, this); } catch (AgentUnavailableException e) { - logger.debug("Failed to send host params on host: " + host.getId()); + logger.debug("Failed to send host params on host: {}", host); } } } @@ -1903,7 +1993,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl for (Long hostId : hostIds) { Answer answer = easySend(hostId, cmds); if (answer == null || !answer.getResult()) { - logger.error("Error sending parameters to agent {}", hostId); + logger.error("Error sending parameters to agent {} ({})", hostId, findAttache(hostId)); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java index 285ba4ffe60..e36b145c8bc 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java @@ -44,14 +44,14 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout s_clusteredAgentMgr = agentMgr; } - public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name) { - super(agentMgr, id, name, null, false); + public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name) { + super(agentMgr, id, uuid, name, null, false); _forward = true; _transferRequests = new LinkedList(); } - public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) { - super(agentMgr, id, name, link, maintenance); + public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) { + super(agentMgr, id, uuid, name, link, maintenance); _forward = link == null; _transferRequests = new LinkedList(); } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index a7fea0f2533..be327418205 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -216,10 +216,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - logger.debug("Loading directly connected host {}({})", host.getId(), host.getName()); + logger.debug("Loading directly connected host {}", host); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e); + logger.warn(" can not load directly connected host {}({}) due to ", + host, e); } } } @@ -243,10 +244,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return new ClusteredAgentHandler(type, link, data); } - protected AgentAttache createAttache(final long id) { - logger.debug("create forwarding ClusteredAgentAttache for {}", id); - final HostVO host = _hostDao.findById(id); - final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); + protected AgentAttache createAttache(final HostVO host) { + logger.debug("create forwarding ClusteredAgentAttache for {}", host); + long id = host.getId(); + final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(id); @@ -261,8 +262,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { - logger.debug("create ClusteredAgentAttache for {}", host.getId()); - final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); + logger.debug("create ClusteredAgentAttache for {}", host); + final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; synchronized (_agents) { @@ -278,7 +279,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("Create ClusteredDirectAgentAttache for {}.", host); - final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); + final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(host.getId()); @@ -321,15 +322,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host {}", hostId); final AgentAttache attache = findAttache(hostId); + logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { // don't process disconnect if the host is being rebalanced if (isAgentRebalanceEnabled()) { final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId); if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { - logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId); + logger.debug( + "Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is being connected to {}", + Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId); return true; } } @@ -338,7 +341,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // don't process disconnect if the disconnect came for the host via delayed cluster notification, // but the host has already reconnected to the current management server if (!attache.forForward()) { - logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId); + logger.debug( + "Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is directly connected to the current management server {}", + Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId); return true; } @@ -545,8 +550,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId); - agent = createAttache(hostId); + logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host); + agent = createAttache(host); } } if (agent == null) { @@ -712,12 +717,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public void onManagementNodeLeft(final List nodeList, final long selfNodeId) { for (final ManagementServerHost vo : nodeList) { - logger.info("Marking hosts as disconnected on Management server {}", vo.getMsid()); + logger.info("Marking hosts as disconnected on Management server {}", vo); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing); outOfBandManagementDao.expireServerOwnership(vo.getMsid()); haConfigDao.expireServerOwnership(vo.getMsid()); - logger.info("Deleting entries from op_host_transfer table for Management server {}", vo.getMsid()); + logger.info("Deleting entries from op_host_transfer table for Management server {}", vo); cleanupTransferMap(vo.getMsid()); } } @@ -744,7 +749,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id={}", agentId, e); + logger.warn("Unable to rebalance host id={} ({})", agentId, findAttache(agentId), e); } } return result; @@ -814,22 +819,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust List hostsToRebalance = new ArrayList(); for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) { - hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad); + hostsToRebalance = lbPlanner.getHostsToRebalance(node, avLoad); if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { break; } - logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); + logger.debug( + "Agent load balancer planner {} found no hosts to be rebalanced from management server {}", + lbPlanner.getName(), node); } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { - logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid()); + logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node); for (final HostVO host : hostsToRebalance) { final long hostId = host.getId(); - logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId); + logger.debug("Asking management server {} to give away host id={}", node, host); boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - logger.warn("Somebody else is already rebalancing host id: {}", hostId); + logger.warn("Somebody else is already rebalancing host: {}", host); continue; } @@ -838,11 +845,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); if (answer == null) { - logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid()); + logger.warn("Failed to get host {} from management server {}", host, node); result = false; } } catch (final Exception ex) { - logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex); + logger.warn("Failed to get host {} from management server {}", host, node, ex); result = false; } finally { if (transfer != null) { @@ -857,7 +864,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } } else { - logger.debug("Found no hosts to rebalance from the management server {}", node.getMsid()); + logger.debug("Found no hosts to rebalance from the management server {}", node); } } } @@ -902,7 +909,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return null; } - logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId); + logger.debug("Propagating agent change request event: {} to agent: {} ({})", event.toString(), agentId, findAttache(agentId)); final Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -942,14 +949,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut)); if (transferMap == null) { - logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId); + logger.debug("Timed out waiting for the host id={} ({}) to be ready to transfer, skipping rebalance for the host", hostId, attache); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId); + logger.debug(String.format("Management server %d doesn't own host id=%d (%s) any more, skipping rebalance for the host", _nodeId, hostId, attache)); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -957,7 +964,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { - logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms); + logger.debug("Can't transfer host {} ({}) as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, attache, ms); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -968,13 +975,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { - logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId); + logger.warn("Failed to submit rebalance task for host id={} ({}); postponing the execution", hostId, attache); continue; } } else { - logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}", - hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); + logger.debug("Agent {} ({}) can't be transferred yet as its request queue size is {} and listener queue size is {}", + hostId, attache, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } } } else { @@ -990,7 +997,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { - logger.debug("Adding agent {} to the list of agents to transfer", hostId); + logger.debug("Adding agent {} ({}) to the list of agents to transfer", hostId, findAttache(hostId)); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } @@ -1012,7 +1019,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } catch (final Exception ex) { - logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex); + logger.warn("Host {} ({}) failed to connect to the management server {} as a part of rebalance process", hostId, findAttache(hostId), futureOwnerId, ex); result = false; } @@ -1027,7 +1034,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName()); + logger.debug("Disconnecting host {} as a part of rebalance process without notification", host); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1035,21 +1042,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId); + logger.debug("Loading directly connected host {} to the management server {} as a part of rebalance process", host, _nodeId); result = loadDirectlyConnectedHost(host, true); } else { - logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName()); + logger.warn("Failed to disconnect {} as a part of rebalance process without notification", host); } } catch (final Exception ex) { - logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex); + logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId, ex); result = false; } if (result) { - logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); + logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); } else { - logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); + logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); } } @@ -1059,9 +1066,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { final boolean success = event == Event.RebalanceCompleted ? true : false; - logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event); final AgentAttache attache = findAttache(hostId); + logger.debug("Finishing rebalancing for the agent {} ({}) with event {}", hostId, attache, event); + if (attache == null || !(attache instanceof ClusteredAgentAttache)) { logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId); _hostTransferDao.completeAgentTransfer(hostId); @@ -1078,7 +1086,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // 2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { - logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId); + logger.debug("Forwarding request {} held in transfer attache [id: {}, uuid: {}, name: {}] from the management server {} to {}", + requestToTransfer.getSequence(), hostId, attache.getUuid(), attache.getName(), _nodeId, futureOwnerId); final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes()); if (!routeResult) { logD(requestToTransfer.getBytes(), "Failed to route request to peer"); @@ -1087,23 +1096,25 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust requestToTransfer = forwardAttache.getRequestToTransfer(); } - logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId); + logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance to {}", + _nodeId, hostId, attache.getUuid(), attache.getName(), futureOwnerId); } else { failRebalance(hostId); } - logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId); + logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance", _nodeId, hostId, attache.getUuid(), attache.getName()); _hostTransferDao.completeAgentTransfer(hostId); } protected void failRebalance(final long hostId) { + AgentAttache attache = findAttache(hostId); try { - logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId); + logger.debug("Management server {} failed to rebalance agent {} ({})", _nodeId, hostId, attache); _hostTransferDao.completeAgentTransfer(hostId); handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true); } catch (final Exception ex) { - logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId); + logger.warn("Failed to reconnect host id={} ({}) as a part of failed rebalance task cleanup", hostId, attache); } } @@ -1119,20 +1130,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId); if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); - final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); + final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host); if (forwardAttache == null) { - logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId); + logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", host); return false; } - logger.debug("Putting agent id={} to transfer mode", hostId); + logger.debug("Putting agent {} to transfer mode", host); forwardAttache.setTransferMode(true); _agents.put(hostId, forwardAttache); } else { if (attache == null) { - logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId); + logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", host, _nodeId); } else { - logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing", - hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); + logger.warn("Attache for the agent {} has request queue size {} and listener queue size {}, can't start host rebalancing", + host, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } return false; } @@ -1167,11 +1178,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void runInContext() { + AgentAttache attache = findAttache(hostId); try { - logger.debug("Rebalancing host id={}", hostId); + logger.debug("Rebalancing host id={} ({})", hostId, attache); rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id={}", hostId, e); + logger.warn("Unable to rebalance host id={} ({})", hostId, attache, e); } } } @@ -1260,7 +1272,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) { final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0]; - logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId()); + logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId())); boolean result = false; try { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java index ac1076a9ff0..e36ea6cedc1 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java @@ -26,8 +26,8 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ClusteredDirectAgentAttache extends DirectAgentAttache implements Routable { private final long _nodeId; - public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String name, long mgmtId, ServerResource resource, boolean maintenance) { - super(agentMgr, id, name, resource, maintenance); + public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String uuid, String name, long mgmtId, ServerResource resource, boolean maintenance) { + super(agentMgr, id, uuid, name, resource, maintenance); _nodeId = mgmtId; } @@ -37,9 +37,9 @@ public class ClusteredDirectAgentAttache extends DirectAgentAttache implements R try { req = Request.parse(data); } catch (ClassNotFoundException e) { - throw new CloudRuntimeException("Unable to rout to an agent ", e); + throw new CloudRuntimeException("Unable to route to an agent ", e); } catch (UnsupportedVersionException e) { - throw new CloudRuntimeException("Unable to rout to an agent ", e); + throw new CloudRuntimeException("Unable to route to an agent ", e); } if (req instanceof Response) { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java index c8e24301b29..523f98fd010 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -31,8 +31,8 @@ public class ConnectedAgentAttache extends AgentAttache { protected Link _link; - public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) { - super(agentMgr, id, name, maintenance); + public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); _link = link; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 927da34104f..07d5bf80393 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -51,8 +51,8 @@ public class DirectAgentAttache extends AgentAttache { AtomicInteger _outstandingTaskCount; AtomicInteger _outstandingCronTaskCount; - public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, ServerResource resource, boolean maintenance) { - super(agentMgr, id, name, maintenance); + public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String uuid,String name, ServerResource resource, boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); _resource = resource; _outstandingTaskCount = new AtomicInteger(0); _outstandingCronTaskCount = new AtomicInteger(0); @@ -60,7 +60,7 @@ public class DirectAgentAttache extends AgentAttache { @Override public void disconnect(Status state) { - logger.debug("Processing disconnect {}({})", _id, _name); + logger.debug("Processing disconnect [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); for (ScheduledFuture future : _futures) { future.cancel(false); @@ -115,7 +115,7 @@ public class DirectAgentAttache extends AgentAttache { if (answers != null && answers[0] instanceof StartupAnswer) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); - logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval); + logger.info("StartupAnswer received [id: {}, uuid: {}, name: {}, interval: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName(), interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } } @@ -126,7 +126,7 @@ public class DirectAgentAttache extends AgentAttache { assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?"; synchronized (this) { if (_resource != null) { - logger.warn("Lost attache for {}({})", _id, _name); + logger.warn("Lost attache for [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); disconnect(Status.Alert); } } @@ -140,7 +140,8 @@ public class DirectAgentAttache extends AgentAttache { } private synchronized void scheduleFromQueue() { - logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get()); + logger.trace("Agent attache [id: {}, uuid: {}, name: {}], task queue size={}, outstanding tasks={}", + _id, _uuid, _name, tasks.size(), _outstandingTaskCount.get()); while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); _agentMgr.getDirectAgentPool().execute(tasks.remove()); @@ -152,7 +153,9 @@ public class DirectAgentAttache extends AgentAttache { protected synchronized void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); + logger.warn( + "PingTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _uuid, _name, _agentMgr.getDirectAgentThreadCap()); return; } @@ -167,21 +170,21 @@ public class DirectAgentAttache extends AgentAttache { } if (cmd == null) { - logger.warn("Unable to get current status on {}({})", _id, _name); + logger.warn("Unable to get current status on [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); return; } if (cmd.getContextParam("logid") != null) { ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - logger.debug("Ping from {}({})", _id, _name); + logger.debug("Ping from [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); long seq = _seq++; logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString()); _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd}); } else { - logger.debug("Unable to send ping because agent is disconnected {}", _id, _name); + logger.debug("Unable to send ping because agent is disconnected [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); } } catch (Exception e) { logger.warn("Unable to complete the ping task", e); @@ -219,7 +222,9 @@ public class DirectAgentAttache extends AgentAttache { long seq = _req.getSequence(); try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); + logger.warn( + "CronTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _uuid, _name, _agentMgr.getDirectAgentThreadCap()); bailout(); return; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java index 7ee524076bb..2f15e7af43c 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java @@ -22,8 +22,8 @@ import com.cloud.host.Status; public class DummyAttache extends AgentAttache { - public DummyAttache(AgentManagerImpl agentMgr, long id, String name, boolean maintenance) { - super(agentMgr, id, name, maintenance); + public DummyAttache(AgentManagerImpl agentMgr, long id, String uuid, String name, boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); } @Override diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java index 7d139e5be14..e73776d134d 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java @@ -18,11 +18,12 @@ package com.cloud.cluster.agentlb; import java.util.List; +import com.cloud.cluster.ManagementServerHostVO; import com.cloud.host.HostVO; import com.cloud.utils.component.Adapter; public interface AgentLoadBalancerPlanner extends Adapter { - List getHostsToRebalance(long msId, int avLoad); + List getHostsToRebalance(ManagementServerHostVO ms, int avLoad); } diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java index 641ae441480..5b05b4df042 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java @@ -26,6 +26,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.cluster.ManagementServerHostVO; import org.springframework.stereotype.Component; import com.cloud.host.Host; @@ -43,15 +44,17 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements HostDao _hostDao = null; @Override - public List getHostsToRebalance(long msId, int avLoad) { + public List getHostsToRebalance(ManagementServerHostVO ms, int avLoad) { + long msId = ms.getMsid(); QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); sc.and(sc.entity().getManagementServerId(), Op.EQ, msId); List allHosts = sc.list(); if (allHosts.size() <= avLoad) { - logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + - "; so it doesn't participate in agent rebalancing process"); + logger.debug("Agent load = {} for management server {} doesn't exceed average " + + "system agent load = {}; so it doesn't participate in agent rebalancing process", + allHosts.size(), ms, avLoad); return null; } @@ -62,8 +65,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements List directHosts = sc.list(); if (directHosts.isEmpty()) { - logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + - "; so it doesn't participate in agent rebalancing process"); + logger.debug("No direct agents in status {} exist for the management server " + + "{}; so it doesn't participate in agent rebalancing process", + Status.Up, ms); return null; } @@ -88,8 +92,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements int hostsLeft = directHosts.size(); List hostsToReturn = new ArrayList(); - logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + - " and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away..."); + logger.debug("Management server {} can give away {} as it currently owns {} and the " + + "average agent load in the system is {}; finalyzing list of hosts to give away...", + ms, hostsToGive, allHosts.size(), avLoad); for (Long cluster : hostToClusterMap.keySet()) { List hostsInCluster = hostToClusterMap.get(cluster); hostsLeft = hostsLeft - hostsInCluster.size(); @@ -113,7 +118,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements } } - logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); + logger.debug("Management server {} is ready to give away {} hosts", ms, hostsToReturn.size()); return hostsToReturn; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 7c107ed6f54..a8b0130bdbc 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -480,7 +480,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { - logger.info("allocating virtual machine from template:{} with hostname:{} and {} networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size()); + logger.info("allocating virtual machine from template: {} with hostname: {} and {} networks", template, vmInstanceName, auxiliaryNetworks.size()); VMInstanceVO persistedVm = null; try { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); @@ -1196,8 +1196,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final Long clusterIdSpecified = planToDeploy.getClusterId(); if (clusterIdSpecified != null && rootVolClusterId != null) { if (!rootVolClusterId.equals(clusterIdSpecified)) { - logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + - rootVolClusterId + ", cluster specified: " + clusterIdSpecified); + logger.debug("Cannot satisfy the deployment plan passed in since " + + "the ready Root volume is in different cluster. volume's cluster: {}, cluster specified: {}", + () -> _clusterDao.findById(rootVolClusterId), () -> _clusterDao.findById(clusterIdSpecified)); throw new ResourceUnavailableException( "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified); @@ -1320,8 +1321,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac syncDiskChainChange(startAnswer); if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) { - logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm.getUuid(), vm.getState(), Event.OperationSucceeded); - throw new ConcurrentOperationException("Failed to deploy VM"+ vm.getUuid()); + logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm, vm.getState(), Event.OperationSucceeded); + throw new ConcurrentOperationException(String.format("Failed to deploy VM %s", vm)); } final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice(); @@ -1348,10 +1349,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } return; } catch (final Exception e) { - logger.error("Retrying after catching exception while trying to secure agent for systemvm id={}", vm.getId(), e); + logger.error("Retrying after catching exception while trying to secure agent for systemvm {}", vm, e); } } - throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId()); + throw new CloudRuntimeException(String.format("Failed to setup and secure agent for systemvm %s", vm)); } return; } else { @@ -1390,7 +1391,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } catch (OperationTimedoutException e) { - logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm.getUuid()); + logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm); if (e.isActive()) { _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop); } @@ -1745,7 +1746,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final List pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, vm.getId()); if (CollectionUtils.isNotEmpty(pendingWorkJobs) || _haMgr.hasPendingHaWork(vm.getId())) { - String msg = "There are pending jobs or HA tasks working on the VM with id: " + vm.getId() + ", can't unmanage the VM."; + String msg = String.format("There are pending jobs or HA tasks working on the VM: %s, can't unmanage the VM.", vm); logger.info(msg); throw new ConcurrentOperationException(msg); } @@ -2124,8 +2125,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } else { HostVO host = _hostDao.findById(hostId); if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) { - logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: {} is not allowed", vm.getId()); - throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode"); + logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM: {} is not allowed", vm); + throw new CloudRuntimeException(String.format("Stop VM operation on the VM %s is not allowed as host is preparing for maintenance mode", vm)); } } @@ -2509,7 +2510,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); logger.debug("Found {} volumes for VM {}(uuid:{}, id:{})", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); for (VolumeObjectTO result : results ) { - logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getId(), result.getPath(), result.getDataStoreUuid()); + logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getUuid(), result.getPath(), result.getDataStoreUuid()); VolumeVO volume = _volsDao.findById(result.getId()); StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid()); if (volume == null || pool == null) { @@ -2660,14 +2661,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) { logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: {} from source host: {}", - vm.getInstanceName(), srcHost.getId()); + vm, srcHost); final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); uvc.setCleanupVmFiles(true); try { _agentMgr.send(srcHost.getId(), uvc); } catch (AgentUnavailableException | OperationTimedoutException e) { - throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() + - " after successfully migrating VM's storage across VMware Datacenters", e); + throw new CloudRuntimeException(String.format( + "Failed to unregister VM: %s from source host: %s after successfully migrating VM's storage across VMware Datacenters", + vm, srcHost), e); } } @@ -2722,10 +2724,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: {}", - dest.getHost().getId()); - throw new CloudRuntimeException( - "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " - + dest.getHost().getId()); + dest.getHost()); + throw new CloudRuntimeException(String.format( + "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: %s", + dest.getHost())); } } } @@ -2852,13 +2854,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException e) { - logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: {}", fromHost, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - logger.warn("Error while checking the vm {} on host {}", vm, dstHostId, e); + logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e); } migrated = true; } finally { @@ -3302,7 +3304,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: {}", srcHost, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("VM not found on destination host. Unable to complete migration for " + vm); @@ -3834,9 +3836,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return; } - logger.debug("Received startup command from hypervisor host. host id: {}", agent.getId()); + logger.debug("Received startup command from hypervisor host. host: {}", agent); - _syncMgr.resetHostSyncState(agent.getId()); + _syncMgr.resetHostSyncState(agent); if (forRebalance) { logger.debug("Not processing listener {} as connect happens on rebalance process", this); @@ -3851,7 +3853,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final long seq_no = _agentMgr.send(agentId, new Commands(syncVMMetaDataCmd), this); logger.debug("Cluster VM metadata sync started with jobid {}", seq_no); } catch (final AgentUnavailableException e) { - logger.fatal("The Cluster VM metadata sync process failed for cluster id {} with {}", clusterId, e); + logger.fatal("The Cluster VM metadata sync process failed for cluster {} with {}", _clusterDao.findById(clusterId), e); } } } @@ -4224,10 +4226,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac logger.debug("Not need to remove the vm {} from network {} as the vm doesn't have nic in this network.", vm, network); return true; } - throw new ConcurrentOperationException("Unable to lock nic " + nic.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock nic %s", nic)); } - logger.debug("Lock is acquired for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network); + logger.debug("Lock is acquired for nic {} as a part of remove vm {} from network {}", lock, vm, network); try { final NicProfile nicProfile = @@ -4256,7 +4258,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return true; } finally { _nicsDao.releaseFromLockTable(lock.getId()); - logger.debug("Lock is released for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network); + logger.debug("Lock is released for nic {} as a part of remove vm {} from network {}", lock, vm, network); } } @@ -4348,9 +4350,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac vm.getServiceOfferingId(); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); - Host srcHost = _hostDao.findById(srcHostId); if (fromHost == null) { - String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHost); + String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHostId); logger.info(logMessageUnableToFindHost); throw new CloudRuntimeException(logMessageUnableToFindHost); } @@ -4359,7 +4360,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long destHostClusterId = dest.getCluster().getId(); long fromHostClusterId = fromHost.getClusterId(); if (fromHostClusterId != destHostClusterId) { - String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", srcHost); + String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", fromHost); logger.info(logMessageHostsOnDifferentCluster); throw new CloudRuntimeException(logMessageHostsOnDifferentCluster); } @@ -4406,7 +4407,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (pfma == null || !pfma.getResult()) { final String details = pfma != null ? pfma.getDetails() : "null answer returned"; pfma = null; - throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dstHostId, details), dstHostId); + throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dest.getHost(), details), dstHostId); } } catch (final OperationTimedoutException e1) { throw new AgentUnavailableException("Operation timed out", dstHostId); @@ -4466,7 +4467,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - logger.error("Unable to cleanup source host [{}] due to [{}].", srcHostId, e.getMessage(), e); + logger.error("Unable to cleanup source host [{}] due to [{}].", fromHost, e.getMessage(), e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); @@ -4801,7 +4802,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac logger.warn("VM {} no longer exists when processing VM state report.", vmId); } } else { - logger.info("There is pending job or HA tasks working on the VM. vm id: {}, postpone power-change report by resetting power-change counters.", vmId ); + logger.info("There is pending job or HA tasks working on the VM. vm: {}, postpone power-change report by resetting power-change counters.", () -> _vmDao.findById(vmId)); _vmDao.resetVmPowerStateTracking(vmId); } } @@ -4842,7 +4843,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac case Running: try { if (vm.getHostId() != null && !vm.getHostId().equals(vm.getPowerHostId())) { - logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); + logger.info("Detected out of band VM migration from host {} to host {}", () -> _hostDao.findById(vm.getHostId()), () -> _hostDao.findById(vm.getPowerHostId())); } stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { @@ -4871,22 +4872,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac case Destroyed: case Expunging: - logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm.getId(), vm.getState()); + logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm, vm.getState()); break; case Migrating: - logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState()); + logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm, vm.getState()); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { logger.warn("Unexpected VM state transition exception, race-condition?", e); } - logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName()); + logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm); break; case Error: default: - logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm.getId(), vm.getState()); + logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm, vm.getState()); break; } } @@ -4901,16 +4902,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), getApiCommandResourceTypeForVm(vm).toString()); case Migrating: logger.info("VM {} is at {} and we received a {} report while there is no pending jobs on it" - , vm.getInstanceName(), vm.getState(), vm.getPowerState()); + , vm, vm.getState(), vm.getPowerState()); if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { - logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm.getInstanceName()); + logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm); if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { - logger.info("VM {} already has a pending HA task working on it.", vm.getInstanceName()); + logger.info("VM {} already has a pending HA task working on it.", vm); } return; } @@ -4937,10 +4938,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), - VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() - + " -> Stopped) from out-of-context transition."); + VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) state is sync-ed (%s -> Stopped) from out-of-context transition.", + vm.getHostName(), vm, vm.getState())); - logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm.getInstanceName()); + logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm); break; @@ -4983,8 +4984,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final VMInstanceVO vm = _vmDao.findById(vmId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), - VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") is stuck in " + vm.getState() - + " state and its host is unreachable for too long"); + VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) is stuck in %s state and its host is unreachable for too long", + vm.getHostName(), vm, vm.getState())); } } @@ -5502,7 +5503,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { - logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm.getId(), e); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm, e); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } @@ -5794,18 +5795,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public Pair findClusterAndHostIdForVm(VirtualMachine vm, boolean skipCurrentHostForStartingVm) { Long hostId = null; + Host host = null; if (!skipCurrentHostForStartingVm || !State.Starting.equals(vm.getState())) { hostId = vm.getHostId(); } Long clusterId = null; - if(hostId == null) { - hostId = vm.getLastHostId(); - logger.debug("host id is null, using last host id {}", hostId); - } if (hostId == null) { - return findClusterAndHostIdForVmFromVolumes(vm.getId()); + if (vm.getLastHostId() == null) { + return findClusterAndHostIdForVmFromVolumes(vm.getId()); + } + hostId = vm.getLastHostId(); + host = _hostDao.findById(hostId); + logger.debug("host id is null, using last host {} with id {}", host, hostId); } - HostVO host = _hostDao.findById(hostId); + host = host == null ? _hostDao.findById(hostId) : host; if (host != null) { clusterId = host.getClusterId(); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java index b2a48a026a3..0f399cf4381 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java @@ -19,15 +19,14 @@ package com.cloud.vm; import java.util.Map; import com.cloud.agent.api.HostVmStateReportEntry; +import com.cloud.host.Host; public interface VirtualMachinePowerStateSync { - void resetHostSyncState(long hostId); + void resetHostSyncState(Host hostId); void processHostVmStateReport(long hostId, Map report); // to adapt legacy ping report void processHostVmStatePingReport(long hostId, Map report, boolean force); - - Map convertVmStateReport(Map states); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 4c89a75d215..94dddfdf18a 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -24,6 +24,10 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.utils.Pair; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.logging.log4j.Logger; @@ -40,54 +44,57 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Inject MessageBus _messageBus; @Inject VMInstanceDao _instanceDao; + @Inject HostDao hostDao; @Inject ManagementServiceConfiguration mgmtServiceConf; public VirtualMachinePowerStateSyncImpl() { } @Override - public void resetHostSyncState(long hostId) { - logger.info("Reset VM power state sync for host: {}.", hostId); - _instanceDao.resetHostPowerStateTracking(hostId); + public void resetHostSyncState(Host host) { + logger.info("Reset VM power state sync for host: {}", host); + _instanceDao.resetHostPowerStateTracking(host.getId()); } @Override public void processHostVmStateReport(long hostId, Map report) { - logger.debug("Process host VM state report. host: {}.", hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Process host VM state report. host: {}", host); - Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo, false); + Map> translatedInfo = convertVmStateReport(report); + processReport(host, translatedInfo, false); } @Override public void processHostVmStatePingReport(long hostId, Map report, boolean force) { - logger.debug("Process host VM state report from ping process. host: {}.", hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Process host VM state report from ping process. host: {}", host); - Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo, force); + Map> translatedInfo = convertVmStateReport(report); + processReport(host, translatedInfo, force); } - private void processReport(long hostId, Map translatedInfo, boolean force) { + private void processReport(HostVO host, Map> translatedInfo, boolean force) { - logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size()); + logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size()); - for (Map.Entry entry : translatedInfo.entrySet()) { + for (Map.Entry> entry : translatedInfo.entrySet()) { - logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); + logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); + if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) { + logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); } else { - logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey()); + logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second()); } } // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(hostId, VirtualMachine.State.Running, + List vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); java.util.Iterator it = vmsThatAreMissingReport.iterator(); while (it.hasNext()) { @@ -99,7 +106,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat // here we need to be wary of out of band migration as opposed to other, more unexpected state changes if (vmsThatAreMissingReport.size() > 0) { Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); + logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime()); // 2 times of sync-update interval for graceful period long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; @@ -109,60 +116,55 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat // Make sure powerState is up to date for missing VMs try { if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId()); + logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance); _instanceDao.resetVmPowerStateTracking(instance.getId()); continue; } } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm", e); + logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e); continue; } Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId()); + logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance); vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId()); + logger.warn("VM update time is null, falling back to creation time for vm: {}", instance); vmStateUpdateTime = instance.getCreated(); } } String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}" - , hostId - , instance.getId() - , instance.getUuid() - , VirtualMachine.PowerState.PowerReportMissing - , lastTime); + logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}", + host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime); long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate); + logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate); // this is were a race condition might have happened if we don't re-fetch the instance; // between the startime of this job and the currentTime of this missing-branch // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId()); + if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) { + logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); } else { - logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId()); + logger.debug("VM power state does not change, skip DB writing. vm: {}", instance); } } else { - logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate); + logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate); } } } - logger.debug("Done with process of VM state report. host: {}", hostId); + logger.debug("Done with process of VM state report. host: {}", host); } - @Override - public Map convertVmStateReport(Map states) { - final HashMap map = new HashMap(); + public Map> convertVmStateReport(Map states) { + final HashMap> map = new HashMap<>(); if (states == null) { return map; } @@ -170,9 +172,9 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat for (Map.Entry entry : states.entrySet()) { VMInstanceVO vm = findVM(entry.getKey()); if (vm != null) { - map.put(vm.getId(), entry.getValue().getState()); + map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm)); } else { - logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey()); + logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue()); } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java index b7c82ce5c21..1b050ffd9de 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java @@ -67,8 +67,8 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi try { List joinRecords = _joinMapDao.listJoinRecords(job.getId()); if (joinRecords.size() != 1) { - logger.warn("AsyncJob-" + job.getId() - + " received wakeup call with un-supported joining job number: " + joinRecords.size()); + logger.warn("AsyncJob-{} ({}) received wakeup call with un-supported " + + "joining job number: {}", job.getId(), job, joinRecords.size()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); @@ -82,7 +82,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi try { workClz = Class.forName(job.getCmd()); } catch (ClassNotFoundException e) { - logger.error("VM work class " + job.getCmd() + " is not found", e); + logger.error("VM work class {} for job {} is not found", job.getCmd(), job, e); return; } @@ -103,14 +103,13 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi handler.invoke(_vmMgr); } else { assert (false); - logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + - " when waking up job-" + job.getId()); + logger.error("Unable to find wakeup handler {} when waking up job-{} ({})", joinRecord.getWakeupHandler(), job.getId(), job); } } finally { CallContext.unregister(); } } catch (Throwable e) { - logger.warn("Unexpected exception in waking up job-" + job.getId()); + logger.warn("Unexpected exception in waking up job-{} ({})", job.getId(), job); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java index 41366f73a01..e48481324df 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java @@ -96,7 +96,7 @@ public class DataCenterResourceManagerImpl implements DataCenterResourceManager public EngineClusterVO loadCluster(String uuid) { EngineClusterVO cluster = _clusterDao.findByUuid(uuid); if (cluster == null) { - throw new InvalidParameterValueException("Pod does not exist"); + throw new InvalidParameterValueException("Cluster does not exist"); } return cluster; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java index c00d939b3df..19b0e773cd0 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEnti import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State.Event; import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -264,4 +265,11 @@ public class EngineClusterVO implements EngineCluster, Identity { public PartitionType partitionType() { return PartitionType.Cluster; } + + @Override + public String toString() { + return String.format("EngineCluster %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java index 57382530f40..5f1203c024a 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java @@ -43,6 +43,7 @@ import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "data_center") @@ -523,4 +524,11 @@ public class EngineDataCenterVO implements EngineDataCenter, Identity { public DataCenter.Type getType() { return type; } + + @Override + public String toString() { + return String.format("EngineDataCenter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java index 684b882fe8a..95931d5b72d 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java @@ -38,6 +38,7 @@ import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host_pod_ref") @@ -246,4 +247,11 @@ public class EngineHostPodVO implements EnginePod, Identity { public State getState() { return state; } + + @Override + public String toString() { + return String.format("EngineHostPod %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index d804f079e17..053d9ac218e 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -53,6 +53,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host") @@ -697,7 +698,9 @@ public class EngineHostVO implements EngineHost, Identity { @Override public String toString() { - return new StringBuilder("Host[").append("-").append(id).append("-").append(type).append("]").toString(); + return String.format("EngineHost %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java index cc33f9eb335..fa8b782f662 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java @@ -297,7 +297,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter {} with id={}, as there is no such dataCenter exists in the database anymore", vo, vo.getId()); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java index 03b4bd9eaaf..96dfdc00d67 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java @@ -300,7 +300,7 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java index 2099ebadb9f..2ad8d15d0b7 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java @@ -451,7 +451,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java index 535e396a376..58bbfcfc1a1 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java @@ -183,7 +183,7 @@ public class EngineHostPodDaoImpl extends GenericDaoBase .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index e1b798d16d6..7efc29b02a6 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1248,18 +1248,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra VlanVO vlanVo = _vlanDao.findByNetworkIdAndIpv4(network.getId(), requestedIpv4Address); if (vlanVo == null) { - throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network id='%s']", - requestedIpv4Address, network.getId())); + throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network '%s']", + requestedIpv4Address, network)); } String ipv4Gateway = vlanVo.getVlanGateway(); String ipv4Netmask = vlanVo.getVlanNetmask(); if (!NetUtils.isValidIp4(ipv4Gateway)) { - throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [VlanId='%s'] is not valid", ipv4Gateway, vlanVo.getId())); + throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Gateway, vlanVo.getId(), vlanVo.getUuid())); } if (!NetUtils.isValidIp4Netmask(ipv4Netmask)) { - throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [VlanId='%s'] is not valid", ipv4Netmask, vlanVo.getId())); + throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Netmask, vlanVo.getId(), vlanVo.getUuid())); } acquireLockAndCheckIfIpv4IsFree(network, requestedIpv4Address); @@ -1273,7 +1273,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra String macAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); nicProfile.setMacAddress(macAddress); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network id='%s']", network.getId()), e); + throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network %s]", network), e); } } } @@ -1285,7 +1285,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), requestedIpv4Address); if (ipVO == null) { throw new InvalidParameterValueException( - String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network id='%s']", requestedIpv4Address, network.getId())); + String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network %s]", requestedIpv4Address, network)); } try { IPAddressVO lockedIpVO = _ipAddressDao.acquireInLockTable(ipVO.getId()); @@ -1489,17 +1489,17 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host.getId()); + logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); } } catch (Exception e) { - logger.warn("Failed to connect to host: {}", host.getName()); + logger.warn("Failed to connect to host: {}", host); } } if (clusterToHostsMap.keySet().size() != clusterVOs.size()) { @@ -1526,7 +1526,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra NetworkVO network = _networksDao.findById(networkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); if (isNetworkImplemented(network)) { - logger.debug("Network id={} is already implemented", networkId); + logger.debug("Network {} is already implemented", network); implemented.set(guru, network); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_UPDATE, network.getAccountId(), network.getDataCenterId(), network.getId(), network.getName(), network.getNetworkOfferingId(), null, network.getState().name(), Network.class.getName(), network.getUuid(), true); @@ -1542,11 +1542,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra throw ex; } - logger.debug("Lock is acquired for network id {} as a part of network implement", networkId); + logger.debug("Lock is acquired for network id {} as a part of network implement", network); try { if (isNetworkImplemented(network)) { - logger.debug("Network id={} is already implemented", networkId); + logger.debug("Network {} is already implemented", network); implemented.set(guru, network); return implemented; } @@ -1618,7 +1618,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } _networksDao.releaseFromLockTable(networkId); - logger.debug("Lock is released for network id {} as a part of network implement", networkId); + logger.debug("Lock is released for network {} as a part of network implement", network); } } @@ -1743,57 +1743,57 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true); } if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) { - logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply firewall Egress rule(s) as a part of network {} restart", network); success = false; } // associate all ip addresses if (!_ipAddrMgr.applyIpAssociations(network, false)) { - logger.warn("Failed to apply ip addresses as a part of network id {} restart", networkId); + logger.warn("Failed to apply ip addresses as a part of network {} restart", network); success = false; } // apply BGP settings if (!bgpService.applyBgpPeers(network, false)) { - logger.warn("Failed to apply bpg peers as a part of network id {} restart", networkId); + logger.warn("Failed to apply bpg peers as a part of network {} restart", network); success = false; } // apply static nat - if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { - logger.warn("Failed to apply static nats a part of network id {} restart", networkId); + if (!_rulesMgr.applyStaticNatsForNetwork(network, false, caller)) { + logger.warn("Failed to apply static nats a part of network {} restart", network); success = false; } // apply firewall rules final List firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) { - logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network {} restart", network); success = false; } // apply port forwarding rules if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply port forwarding rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply port forwarding rule(s) as a part of network {} restart", network); success = false; } // apply static nat rules if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply static nat rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply static nat rule(s) as a part of network {} restart", network); success = false; } // apply public load balancer rules - if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { - logger.warn("Failed to reapply Public load balancer rules as a part of network id={} restart", networkId); + if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Public)) { + logger.warn("Failed to reapply Public load balancer rules as a part of network {} restart", network); success = false; } // apply internal load balancer rules - if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { - logger.warn("Failed to reapply internal load balancer rules as a part of network id={} restart", networkId); + if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Internal)) { + logger.warn("Failed to reapply internal load balancer rules as a part of network {} restart", network); success = false; } @@ -1803,7 +1803,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final RemoteAccessVpn vpn : vpnsToReapply) { // Start remote access vpn per ip if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) { - logger.warn("Failed to reapply vpn rules as a part of network id={} restart", networkId); + logger.warn("Failed to reapply vpn rules as a part of network {} restart", network); success = false; } } @@ -1811,7 +1811,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //apply network ACLs if (!_networkACLMgr.applyACLToNetwork(networkId)) { - logger.warn("Failed to reapply network ACLs as a part of of network id={}", networkId); + logger.warn("Failed to reapply network ACLs as a part of of network {}", network); success = false; } @@ -1922,13 +1922,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra long userId = User.UID_SYSTEM; //remove all PF/Static Nat rules for the network logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration", - services, network.getUuid(), network.getNetworkOfferingId()); + services::toString, network::toString, () -> _networkOfferingDao.findById(network.getNetworkOfferingId())); if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) { try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network); } else { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network); } if (services.contains(Service.StaticNat.getName())) { //removing static nat configured on ips. @@ -1947,7 +1947,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra }); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } } if (services.contains(Service.SourceNat.getName())) { @@ -1966,22 +1966,22 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (services.contains(Service.Lb.getName())) { //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) { - logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); + logger.debug("Successfully cleaned up load balancing rules for network {}", network); } else { - logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network); } } if (services.contains(Service.Firewall.getName())) { //revoke all firewall rules for the network try { - if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); + if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, userId, caller)) { + logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } } @@ -1991,7 +1991,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra try { _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true); } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network.getUuid(), ex); + logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network, ex); } } } @@ -2088,20 +2088,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } @DB - protected void updateNic(final NicVO nic, final long networkId, final int count) { + protected void updateNic(final NicVO nic, final Network network, final int count) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { _nicDao.update(nic.getId(), nic); if (nic.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id={} on {}", networkId, count); - _networksDao.changeActiveNicsBy(networkId, count); + logger.debug(String.format("Changing active number of nics for network id=%s on %d", network, count)); + _networksDao.changeActiveNicsBy(network.getId(), count); } if (nic.getVmType() == VirtualMachine.Type.User - || nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(networkId).getTrafficType() == TrafficType.Guest) { - _networksDao.setCheckForGc(networkId); + || nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(network.getId()).getTrafficType() == TrafficType.Guest) { + _networksDao.setCheckForGc(network.getId()); } } }); @@ -2128,8 +2128,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final NicVO nic : nics) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); - throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); + NetworkVO network = _networksDao.findById(nic.getNetworkId()); + logger.warn("Failed to implement network: {} as a part of preparing nic {}", network, nic); + throw new CloudRuntimeException(String.format("Failed to implement network id=%s as a part preparing nic %s", network, nic)); } final NetworkVO network = implemented.second(); @@ -2194,7 +2195,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra Pair networks = getGuestNetworkRouterAndVpcDetails(vmProfile.getId()); setMtuDetailsInVRNic(networks, network, nic); } - updateNic(nic, network.getId(), 1); + updateNic(nic, network, 1); final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { @@ -2299,7 +2300,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) { - throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); + throw new CloudRuntimeException(String.format("Service provider %s either doesn't exist or is not enabled in physical network: %s", element.getProvider().getName(), _physicalNetworkDao.findById(network.getPhysicalNetworkId()))); } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { @@ -2324,10 +2325,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (nic == null && !addedURIs.contains(broadcastUri.toString())) { //Nic details are not available in DB //Create nic profile for migration - logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm.getId()); final NetworkVO network = _networksDao.findById(ntwkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NicProfile profile = new NicProfile(); + logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), network, vm); profile.setDeviceId(255); //dummyId profile.setIPv4Address(userIp.getAddress().toString()); profile.setIPv4Netmask(publicIp.getNetmask()); @@ -2467,7 +2468,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra applyProfileToNicForRelease(nic, profile); nic.setState(Nic.State.Allocated); if (originalState == Nic.State.Reserved) { - updateNic(nic, network.getId(), -1); + updateNic(nic, network, -1); } else { _nicDao.update(nic.getId(), nic); } @@ -2476,7 +2477,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return new Pair<>(network, profile); } else { nic.setState(Nic.State.Allocated); - updateNic(nic, network.getId(), -1); + updateNic(nic, network, -1); } } @@ -2513,7 +2514,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public void cleanupNics(final VirtualMachineProfile vm) { - logger.debug("Cleaning network for vm: {}", vm.getId()); + logger.debug("Cleaning network for vm: {}", vm); final List nics = _nicDao.listByVmId(vm.getId()); for (final NicVO nic : nics) { @@ -2610,7 +2611,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra _nicDao.remove(nic.getId()); } - logger.debug("Removed nic id={}", nic.getId()); + logger.debug("Removed nic {}", nic); // release assigned IPv6 for Isolated Network VR NIC if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType()) @@ -2623,7 +2624,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //remove the secondary ip addresses corresponding to this nic if (!removeVmSecondaryIpsOfNic(nic.getId())) { - logger.debug("Removing nic {} secondary ip addresses failed", nic.getId()); + logger.debug("Removing nic {} secondary ip addresses failed", nic); } } @@ -2837,16 +2838,21 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) && _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) { - throw new InvalidParameterValueException("The VLAN tag for isolated PVLAN " + isolatedPvlan + " is already being used for dynamic vlan allocation for the guest network in zone " - + zone.getName()); + throw new InvalidParameterValueException(String.format( + "The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s", + isolatedPvlan, zone)); } if (!UuidUtils.isUuid(vlanId)) { // For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) { if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + vlanId + " already exists or overlaps with other network vlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network vlans in zone %s", + vlanId, zone)); } else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + isolatedPvlan + " already exists or overlaps with other network vlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network vlans in zone %s", + isolatedPvlan, zone)); } else { final List dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri)); //for the network that is created as part of private gateway, @@ -2878,7 +2884,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or // shared network with same Vlan ID in the zone if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) { - throw new InvalidParameterValueException("There is an existing isolated/shared network that overlaps with vlan id:" + vlanId + " in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone)); } } } @@ -2893,7 +2900,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) { if (networkDomain != null) { // TBD: NetworkOfferingId and zoneId. Send uuids instead. - throw new InvalidParameterValueException("Domain name change is not supported by network offering id=" + networkOfferingId + " in zone id=" + zoneId); + throw new InvalidParameterValueException(String.format( + "Domain name change is not supported by network offering id=%d in zone %s", + networkOfferingId, zone)); } } else { if (networkDomain == null) { @@ -3028,8 +3037,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + vlanIdFinal + - " already exists or overlaps with other network pvlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network pvlans in zone %s", + vlanIdFinal, zone)); } userNetwork.setBroadcastUri(uri); @@ -3044,9 +3054,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } URI uri = NetUtils.generateUriForPvlan(vlanIdFinal, isolatedPvlan, isolatedPvlanType.toString()); if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString(), isolatedPvlanType).size() > 0) { - throw new InvalidParameterValueException("Network with primary vlan " + vlanIdFinal + - " and secondary vlan " + isolatedPvlan + " type " + isolatedPvlanType + - " already exists or overlaps with other network pvlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with primary vlan %s and secondary vlan %s type %s already exists or overlaps with other network pvlans in zone %s", + vlanIdFinal, isolatedPvlan, isolatedPvlanType, zone)); } userNetwork.setBroadcastUri(uri); userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan); @@ -3189,7 +3199,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean result = false; if (success) { - logger.debug("Network id={} is shutdown successfully, cleaning up corresponding resources now.", networkId); + logger.debug("Network {} is shutdown successfully, cleaning up corresponding resources now.", networkFinal); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); @@ -3250,14 +3260,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } if (cleanupNeeded) { - cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId()); + cleanupResult = shutdownNetworkResources(network, context.getAccount(), context.getCaller().getId()); } } catch (final Exception ex) { logger.warn("shutdownNetworkRules failed during the network {} shutdown due to", network, ex); } finally { // just warn the administrator that the network elements failed to shutdown if (!cleanupResult) { - logger.warn("Failed to cleanup network id={} resources as a part of shutdownNetwork", network.getId()); + logger.warn("Failed to cleanup network {} resources as a part of shutdownNetwork", network); } } @@ -3299,15 +3309,15 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to); CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host.getId()); + logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); } } catch (Exception e) { - logger.warn("Failed to cleanup network resources on host: {}", host.getName()); + logger.warn("Failed to cleanup network resources on host: {}", host); } } } @@ -3337,7 +3347,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra // Don't allow to delete network via api call when it has vms assigned to it final int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - logger.debug("The network id={} has active Nics, but shouldn't.", networkId); + logger.debug("The network {} has active Nics, but shouldn't.", network); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, -1 * nicCount); @@ -3367,7 +3377,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean success = true; if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) { - logger.warn("Unable to delete network id={}: failed to cleanup network resources", networkId); + logger.warn("Unable to delete network {}: failed to cleanup network resources", network); return false; } @@ -3396,7 +3406,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (success) { - logger.debug("Network id={} is destroyed successfully, cleaning up corresponding resources now.", networkId); + logger.debug("Network {} is destroyed successfully, cleaning up corresponding resources now.", network); final NetworkVO networkFinal = network; try { @@ -3495,7 +3505,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra for (final VlanVO vlan : publicVlans) { VlanVO vlanRange = _configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount); if (vlanRange == null) { - logger.warn("Failed to delete vlan " + vlan.getId() + ");"); + logger.warn("Failed to delete vlan [id: {}, uuid: {}];", vlan.getId(), vlan.getUuid()); result = false; } else { deletedPublicVlanRange.add(vlanRange); @@ -3505,16 +3515,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //cleanup private vlans final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId); if (privateIpAllocCount > 0) { - logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", networkId); + logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", network); result = false; } else { _privateIpDao.deleteByNetworkId(networkId); - logger.debug("Deleted ip range for private network id={}", networkId); + logger.debug("Deleted ip range for private network {}", network); } // release vlans of user-shared networks without specifyvlan if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) { - logger.debug("Releasing vnet for the network id={}", network.getId()); + logger.debug("Releasing vnet for the network {}", network); _dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(), network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId()); } @@ -3560,10 +3570,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", networkId, currentTime); + logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", () -> _networksDao.findById(networkId), () -> currentTime); stillFree.put(networkId, currentTime); } else if (time > currentTime - netGcWait) { - logger.debug("Network {} is still free but it's not time to shutdown yet: {}",networkId, time); + logger.debug("Network {} is still free but it's not time to shutdown yet: {}", () -> _networksDao.findById(networkId), time::toString); stillFree.put(networkId, time); } else { shutdownList.add(networkId); @@ -3590,7 +3600,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra shutdownNetwork(networkId, context, false); } catch (final Exception e) { - logger.warn("Unable to shutdown network: {}", networkId); + logger.warn("Unable to shutdown network: {}", () -> _networksDao.findById(networkId)); } } } @@ -3630,7 +3640,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra boolean restartRequired = false; final NetworkVO network = _networksDao.findById(networkId); - logger.debug("Restarting network {}...", networkId); + logger.debug("Restarting network {}...", network); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()); @@ -3985,51 +3995,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra //remove all PF/Static Nat rules for the network try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network); } else { success = false; - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) { - logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); + logger.debug("Successfully cleaned up load balancing rules for network {}", network); } else { // shouldn't even come here as network is being cleaned up after all network elements are shutdown success = false; - logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network); } //revoke all firewall rules for the network try { - if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); + if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, callerUserId, caller)) { + logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { success = false; - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } //revoke all network ACLs for network try { if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - logger.debug("Successfully cleaned up NetworkACLs for network id={}", networkId); + logger.debug("Successfully cleaned up NetworkACLs for network {}", network); } else { success = false; - logger.warn("Failed to cleanup NetworkACLs as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup NetworkACLs as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; - logger.warn("Failed to cleanup Network ACLs as a part of network id={} cleanup due to resourceUnavailable ", networkId, ex); + logger.warn("Failed to cleanup Network ACLs as a part of network {} cleanup due to resourceUnavailable ", network, ex); } //release all ip addresses @@ -4047,7 +4057,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra logger.debug("Portable IP address {} is no longer associated with any network", ipToRelease); } } else { - _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ipToRelease, network); } } @@ -4065,14 +4075,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return success; } - private boolean shutdownNetworkResources(final long networkId, final Account caller, final long callerUserId) { + private boolean shutdownNetworkResources(final Network network, final Account caller, final long callerUserId) { // This method cleans up network rules on the backend w/o touching them in the DB boolean success = true; - final Network network = _networksDao.findById(networkId); // Mark all PF rules as revoked and apply them on the backend (not in the DB) - final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), networkId); + final List pfRules = _portForwardingRulesDao.listByNetwork(network.getId()); + logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), network); for (final PortForwardingRuleVO pfRule : pfRules) { logger.trace("Marking pf rule {} with Revoke state", pfRule); @@ -4090,9 +4099,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } // Mark all static rules as revoked and apply them on the backend (not in the DB) - final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); + final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(network.getId(), Purpose.StaticNat); final List staticNatRules = new ArrayList(); - logger.debug("Releasing {} static nat rules for network id={} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), networkId); + logger.debug("Releasing {} static nat rules for network {} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), network); for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { logger.trace("Marking static nat rule {} with Revoke state", firewallStaticNatRule); @@ -4100,7 +4109,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId()); if (ip == null || !ip.isOneToOneNat() || ip.getAssociatedWithVmId() == null) { - throw new InvalidParameterValueException("Source ip address of the rule id=" + firewallStaticNatRule.getId() + " is not static nat enabled"); + throw new InvalidParameterValueException(String.format("Source ip address of the rule %s is not static nat enabled", firewallStaticNatRule)); } //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), firewallStaticNatRule.getNetworkId()); @@ -4119,7 +4128,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } try { - if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Public)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Public)) { logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); success = false; } @@ -4129,7 +4138,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } try { - if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Internal)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Internal)) { logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); success = false; } @@ -4139,8 +4148,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } // revoke all firewall rules for the network w/o applying them on the DB - final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - logger.debug("Releasing firewall ingress rules for network id={} as a part of shutdownNetworkRules", firewallRules.size(), networkId); + final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Ingress); + logger.debug("Releasing firewall ingress rules for network {} as a part of shutdownNetworkRules", firewallRules.size(), network); for (final FirewallRuleVO firewallRule : firewallRules) { logger.trace("Marking firewall ingress rule {} with Revoke state", firewallRule); @@ -4157,8 +4166,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra success = false; } - final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - logger.debug("Releasing {} firewall egress rules for network id={} as a part of shutdownNetworkRules", firewallEgressRules.size(), networkId); + final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Egress); + logger.debug("Releasing {} firewall egress rules for network {} as a part of shutdownNetworkRules", firewallEgressRules.size(), network); try { // delete default egress rule @@ -4166,7 +4175,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall) && (network.getGuestType() == Network.GuestType.Isolated || network.getGuestType() == Network.GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) { // add default egress rule to accept the traffic - _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(networkId), false); + _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(network.getId()), false); } } catch (final ResourceUnavailableException ex) { @@ -4190,11 +4199,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } if (network.getVpcId() != null) { - logger.debug("Releasing Network ACL Items for network id={} as a part of shutdownNetworkRules", networkId); + logger.debug("Releasing Network ACL Items for network {} as a part of shutdownNetworkRules", network); try { //revoke all Network ACLs for the network w/o applying them in the DB - if (!_networkACLMgr.revokeACLItemsForNetwork(networkId)) { + if (!_networkACLMgr.revokeACLItemsForNetwork(network.getId())) { logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); success = false; } @@ -4206,13 +4215,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } //release all static nats for the network - if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { - logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id {}", networkId); + if (!_rulesMgr.applyStaticNatForNetwork(network, false, caller, true)) { + logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network {}", network); success = false; } // Get all ip addresses, mark as releasing and release them on the backend - final List userIps = _ipAddressDao.listByAssociatedNetwork(networkId, null); + final List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); final List publicIpsToRelease = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { for (final IPAddressVO userIp : userIps) { @@ -4310,12 +4319,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd); if (answer == null) { - logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host.getId()); - throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId()); + logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host); + throw new ConnectionException(true, String.format("Unable to get an answer to the CheckNetworkCommand from agent: %s", host)); } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", hostId, answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg); throw new ConnectionException(true, msg); @@ -4471,8 +4480,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (prepare) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); - throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network {} as a part of preparing nic {}", network, nic); + throw new CloudRuntimeException(String.format("Failed to implement network %s as a part preparing nic %s", network, nic)); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network); @@ -4588,18 +4597,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final List providers = getProvidersForServiceInNetwork(network, service); //Only support one provider now if (providers == null) { - logger.error("Cannot find {} provider for network {}", service.getName(), network.getId()); + logger.error("Cannot find {} provider for network {}", service.getName(), network); return null; } if (providers.size() != 1 && service != Service.Lb) { //support more than one LB providers only - logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network.getId()); + logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network); return null; } for (final Provider provider : providers) { final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network.getId()); + logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network); elements.add(element); } return elements; @@ -4693,7 +4702,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public Pair importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced) throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { - logger.debug("Allocating nic for vm {} in network {} during import", vm.getUuid(), network); + logger.debug("Allocating nic for vm {} in network {} during import", vm, network); String selectedIp = null; if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) { if (ipAddresses.getIp4Address().equals("auto")) { @@ -4743,7 +4752,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra int count = 1; if (vo.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id={} on {}", network.getUuid(), count); + logger.debug("Changing active number of nics for network {} on {}", network, count); _networksDao.changeActiveNicsBy(network.getId(), count); } if (vo.getVmType() == VirtualMachine.Type.User @@ -4807,16 +4816,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra private String generateNewMacAddressIfForced(Network network, String macAddress, boolean forced) { if (!forced) { - throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network with ID " + network.getUuid() + + throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network " + network + " and forced flag is disabled"); } try { - logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network.getName(), macAddress); + logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network, macAddress); String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); logger.debug("Successfully generated the mac address {}, using it instead of the conflicting address {}", newMacAddress, macAddress); return newMacAddress; } catch (InsufficientAddressCapacityException e) { - String msg = String.format("Could not generate a new mac address on network %s", network.getName()); + String msg = String.format("Could not generate a new mac address on network %s", network); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -4824,7 +4833,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public void unmanageNics(VirtualMachineProfile vm) { - logger.debug("Unmanaging NICs for VM: {}", vm.getId()); + logger.debug("Unmanaging NICs for VM: {}", vm); VirtualMachine virtualMachine = vm.getVirtualMachine(); final List nics = _nicDao.listByVmId(vm.getId()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index ec5d5efb5cf..0773c20b6b9 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -151,7 +151,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates); if (files.isEmpty()) { - return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore.getId()), migrationPolicy.toString(), true); + return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore), migrationPolicy.toString(), true); } Map> storageCapacities = new Hashtable<>(); for (Long storeId : destDatastores) { @@ -159,7 +159,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); if (migrationPolicy == MigrationPolicy.COMPLETE) { - logger.debug("Setting source image store: {} to read-only", srcDatastore.getId()); + logger.debug("Setting source image store: {} to read-only", srcDatastore); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -309,8 +309,9 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra message += "Image stores have been attempted to be balanced"; success = true; } else { - message = "Files not completely migrated from "+ srcDatastore.getId() + ". Datastore (source): " + srcDatastore.getId() + "has equal or more free space than destination."+ - " If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command"; + message = String.format("Files not completely migrated from %s. Source datastore " + + "has equal or more free space than destination. If you want to continue using the Image Store, " + + "please change the read-only status using 'update imagestore' command", srcDatastore); success = false; } } else { @@ -353,7 +354,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra task.setTemplateChain(templateChains); } futures.add((executor.submit(task))); - logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); + logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()); return storageCapacities; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 36e28145949..06061908888 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -885,7 +885,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati Account owner, long deviceId, String configurationId) { assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template."; - Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); + Long size = _tmpltMgr.getTemplateSize(template, vm.getDataCenterId()); if (rootDisksize != null) { if (template.isDeployAsIs()) { // Volume size specified from template deploy-as-is @@ -994,7 +994,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (configurationDetail != null) { configurationId = configurationDetail.getValue(); } - templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template.getId(), DataStoreRole.Image, configurationId); + templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template, DataStoreRole.Image, configurationId); if (CollectionUtils.isNotEmpty(templateAsIsDisks)) { templateAsIsDisks = templateAsIsDisks.stream() .filter(x -> !x.isIso()) diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java index 376e189d875..52b7ed77533 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java @@ -47,7 +47,7 @@ public class AgentManagerImplTest { host = new HostVO("some-Uuid"); host.setDataCenterId(1L); cmds = new StartupCommand[]{new StartupRoutingCommand()}; - attache = new ConnectedAgentAttache(null, 1L, "kvm-attache", null, false); + attache = new ConnectedAgentAttache(null, 1L, "uuid", "kvm-attache", null, false); hostDao = Mockito.mock(HostDao.class); storagePoolMonitor = Mockito.mock(Listener.class); @@ -83,4 +83,24 @@ public class AgentManagerImplTest { } Mockito.verify(mgr, Mockito.times(1)).handleDisconnectWithoutInvestigation(Mockito.any(attache.getClass()), Mockito.eq(Status.Event.AgentDisconnected), Mockito.eq(true), Mockito.eq(true)); } + + @Test + public void testGetTimeoutWithPositiveTimeout() { + Commands commands = Mockito.mock(Commands.class); + int timeout = 30; + int result = mgr.getTimeout(commands, timeout); + + Assert.assertEquals(30, result); + } + + @Test + public void testGetTimeoutWithGranularTimeout() { + Commands commands = Mockito.mock(Commands.class); + Mockito.doReturn(50).when(mgr).getTimeoutFromGranularWaitTime(commands); + + int timeout = 0; + int result = mgr.getTimeout(commands, timeout); + + Assert.assertEquals(50, result); + } } diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java index 3fa6d8d9729..0b42b505668 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java @@ -31,8 +31,8 @@ public class ConnectedAgentAttacheTest { Link link = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); assertTrue(agentAttache1.equals(agentAttache2)); } @@ -42,7 +42,7 @@ public class ConnectedAgentAttacheTest { Link link = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); assertFalse(agentAttache1.equals(null)); } @@ -53,8 +53,8 @@ public class ConnectedAgentAttacheTest { Link link1 = mock(Link.class); Link link2 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link1, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link2, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link1, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link2, false); assertFalse(agentAttache1.equals(agentAttache2)); } @@ -64,8 +64,8 @@ public class ConnectedAgentAttacheTest { Link link1 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, null, link1, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, "uuid", null, link1, false); assertFalse(agentAttache1.equals(agentAttache2)); } @@ -75,7 +75,7 @@ public class ConnectedAgentAttacheTest { Link link1 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false); assertFalse(agentAttache1.equals("abc")); } diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java index fe9b7fafa81..65e31c271a4 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java @@ -26,6 +26,8 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.resource.ServerResource; +import java.util.UUID; + @RunWith(MockitoJUnitRunner.class) public class DirectAgentAttacheTest { @Mock @@ -36,9 +38,11 @@ public class DirectAgentAttacheTest { long _id = 0L; + String _uuid = UUID.randomUUID().toString(); + @Before public void setup() { - directAgentAttache = new DirectAgentAttache(_agentMgr, _id, "myDirectAgentAttache", _resource, false); + directAgentAttache = new DirectAgentAttache(_agentMgr, _id, _uuid, "myDirectAgentAttache", _resource, false); MockitoAnnotations.initMocks(directAgentAttache); } diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index d9971815f5e..fdda38fbc39 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -31,6 +31,7 @@ import javax.persistence.Table; import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host_pod_ref") @@ -197,4 +198,11 @@ public class HostPodVO implements Pod { public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + return String.format("HostPod %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java index 7423ded598f..c271325f3de 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vlan") @@ -192,24 +193,11 @@ public class VlanVO implements Vlan { @Override public String toString() { if (toString == null) { - toString = - new StringBuilder("Vlan[").append(vlanTag) - .append("|") - .append(vlanGateway) - .append("|") - .append(vlanNetmask) - .append("|") - .append(ip6Gateway) - .append("|") - .append(ip6Cidr) - .append("|") - .append(ipRange) - .append("|") - .append(ip6Range) - .append("|") - .append(networkId) - .append("]") - .toString(); + toString = String.format("Vlan %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", + "vlanTag", "vlanGateway", "vlanNetmask", "ip6Gateway", "ip6Cidr", + "ipRange", "ip6Range", "networkId")); + } return toString; } diff --git a/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java b/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java index 6390d923ed8..5a4a71f82e7 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.Encrypt; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * VmwareDatacenterVO contains information of Vmware Datacenter associated with a CloudStack zone. @@ -125,7 +126,9 @@ public class VmwareDatacenterVO implements VmwareDatacenter { @Override public String toString() { - return new StringBuilder("VmwareDatacenter[").append(guid).append("]").toString(); + return String.format("VmwareDatacenter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "guid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java index 4c36a3401ca..c950fa31c88 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java @@ -26,6 +26,7 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -206,7 +207,9 @@ public class DomainVO implements Domain { @Override public String toString() { - return new StringBuilder("Domain:").append(id).append(path).toString(); + return String.format("Domain %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "path")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index b5b634a73a7..a449eb450cf 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -712,7 +712,7 @@ public class HostVO implements Host { @Override public String toString() { - return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "type")); + return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java b/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java index 22bb2c26b65..ee5f67b09cd 100644 --- a/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java @@ -27,6 +27,7 @@ import javax.persistence.PrimaryKeyJoinColumn; import javax.persistence.Table; import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "load_balancer_healthcheck_policies") @@ -169,4 +170,11 @@ public class LBHealthCheckPolicyVO implements HealthCheckPolicy { public boolean isDisplay() { return display; } + + @Override + public String toString() { + return String.format("LBHealthCheckPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "pingPath")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java index fa5dcafba34..24d8b8e7f40 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java @@ -33,6 +33,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "autoscale_policies") @@ -92,7 +93,9 @@ public class AutoScalePolicyVO implements AutoScalePolicy, InternalIdentity { @Override public String toString() { - return new StringBuilder("AutoScalePolicy[").append("id-").append(id).append("]").toString(); + return String.format("AutoScalePolicy %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java index 652cbb340a3..307de9f1a60 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java @@ -32,6 +32,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.utils.db.GenericDao; @@ -126,11 +127,9 @@ public class AutoScaleVmGroupVO implements AutoScaleVmGroup, InternalIdentity, I @Override public String toString() { - return new StringBuilder("AutoScaleVmGroupVO[").append("id=").append(id) - .append("|name=").append(name) - .append("|loadBalancerId=").append(loadBalancerId) - .append("|profileId=").append(profileId) - .append("]").toString(); + return String.format("AutoScaleVmGroup %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "loadBalancerId", "profileId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java index 21291062756..562d908507e 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java @@ -37,6 +37,7 @@ import javax.persistence.Table; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; @@ -126,7 +127,9 @@ public class AutoScaleVmProfileVO implements AutoScaleVmProfile, Identity, Inter @Override public String toString() { - return new StringBuilder("AutoScaleVMProfileVO[").append("id").append(id).append("-").append("templateId").append("-").append(templateId).append("]").toString(); + return String.format("AutoScaleVMProfile %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "templateId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java b/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java index 18e67a4af61..0679dac3235 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "conditions") @@ -91,7 +92,9 @@ public class ConditionVO implements Condition, Identity, InternalIdentity { @Override public String toString() { - return new StringBuilder("Condition[").append("id-").append(id).append("]").toString(); + return String.format("Condition %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java index e5ab9886dda..be21515bb51 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java @@ -34,6 +34,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.network.Network; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "counter") @@ -79,7 +80,9 @@ public class CounterVO implements Counter, Identity, InternalIdentity { @Override public String toString() { - return new StringBuilder("Counter[").append("id-").append(id).append("]").toString(); + return String.format("Counter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java b/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java index 80bec1b8152..88c5c0885a8 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java @@ -30,6 +30,7 @@ import javax.persistence.Table; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * ExternalLoadBalancerDeviceVO contains information on external load balancer devices (F5/Netscaler VPX,MPX,SDX) added into a deployment @@ -244,4 +245,11 @@ public class ExternalLoadBalancerDeviceVO implements InternalIdentity, Identity public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + return String.format("ExternalLoadBalancerDevice %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "providerName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java index 4c7569a55b9..88e146d2a80 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java @@ -33,6 +33,7 @@ import javax.persistence.TemporalType; import com.cloud.network.IpAddress; import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.Ip; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * A bean representing a public IP Address @@ -268,7 +269,9 @@ public class IPAddressVO implements IpAddress { @Override public String toString() { - return new StringBuilder("Ip[").append(address).append("-").append(dataCenterId).append("]").toString(); + return String.format("IPAddress %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "dataCenterId", "address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java b/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java index e9f50a75a7b..72b8fc151b7 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java @@ -33,6 +33,7 @@ import javax.persistence.Table; import com.cloud.network.rules.StickinessPolicy; import com.cloud.utils.Pair; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "load_balancer_stickiness_policies") @@ -162,4 +163,11 @@ public class LBStickinessPolicyVO implements StickinessPolicy { public boolean isDisplay() { return display; } + + @Override + public String toString() { + return String.format("LBStickinessPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "methodName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java index bd5ea95dcc7..ad0338b9849 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java @@ -27,6 +27,7 @@ import javax.persistence.Table; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.LoadBalancer; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * This VO represents Public Load Balancer @@ -136,4 +137,11 @@ public class LoadBalancerVO extends FirewallRuleVO implements LoadBalancer { public String getCidrList() { return cidrList; } + + @Override + public String toString() { + return String.format("LoadBalancer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "purpose", "state")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java index 415b513b405..9557c7465bf 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java @@ -35,6 +35,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.network.Network.Service; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "physical_network_service_providers") @@ -109,6 +110,13 @@ public class PhysicalNetworkServiceProviderVO implements PhysicalNetworkServiceP this.uuid = UUID.randomUUID().toString(); } + + @Override + public String toString() { + return String.format("PhysicalNetworkServiceProvider %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "providerName")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java index 52ebe7596a4..68e023984a0 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java @@ -37,6 +37,7 @@ import com.cloud.network.PhysicalNetwork; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * NetworkConfigurationVO contains information about a specific physical network. @@ -248,4 +249,11 @@ public class PhysicalNetworkVO implements PhysicalNetwork { public String getName() { return name; } + + @Override + public String toString() { + return String.format("PhysicalNetwork %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java index 95e3693a99c..2439ea55b4a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java @@ -18,6 +18,7 @@ package com.cloud.network.dao; import com.cloud.network.RemoteAccessVpn; import com.cloud.utils.db.Encrypt; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -86,6 +87,11 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { this.vpcId = vpcId; } + @Override + public String toString() { + return String.format("RemoteAccessVpn %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid")); + } + @Override public State getState() { return state; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java index 52741fdd9a5..e5394238c31 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java @@ -29,6 +29,7 @@ import javax.persistence.Table; import com.cloud.network.Site2SiteCustomerGateway; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -110,6 +111,13 @@ public class Site2SiteCustomerGatewayVO implements Site2SiteCustomerGateway { this.ikeVersion = ikeVersion; } + @Override + public String toString() { + return String.format("Site2SiteCustomerGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java index b032966dd5a..4d6bee5c861 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.network.Site2SiteVpnConnection; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -182,4 +183,11 @@ public class Site2SiteVpnConnectionVO implements Site2SiteVpnConnection, Interna public String getName() { return null; } + + @Override + public String toString() { + return String.format("Site2SiteVpnConnection %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "state")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java index 703c78c7b86..a5eb7efce23 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.network.Site2SiteVpnGateway; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -70,6 +71,13 @@ public class Site2SiteVpnGatewayVO implements Site2SiteVpnGateway { this.domainId = domainId; } + @Override + public String toString() { + return String.format("Site2SiteVpnGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java index 07b25e7a28c..1dfdc5093a5 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java @@ -36,6 +36,7 @@ import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "firewall_rules") @@ -258,7 +259,9 @@ public class FirewallRuleVO implements FirewallRule { @Override public String toString() { - return new StringBuilder("Rule[").append(id).append("-").append(purpose).append("-").append(state).append("]").toString(); + return String.format("FirewallRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "networkId", "purpose", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java index 1980cd33d14..325a6efc867 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.security; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -89,6 +91,13 @@ public class SecurityGroupRuleVO implements SecurityRule { } } + @Override + public String toString() { + return String.format("SecurityGroupRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "type")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java index ec1cfae43b6..940baaad18d 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.security; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -60,6 +62,13 @@ public class SecurityGroupVO implements SecurityGroup { uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SecurityGroup %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java index f28b3125a09..4333d35d473 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java @@ -35,6 +35,7 @@ import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_acl_item") @@ -168,7 +169,9 @@ public class NetworkACLItemVO implements NetworkACLItem, Cloneable { @Override public String toString() { - return new StringBuilder("Rule[").append(id).append("-").append("NetworkACL").append("-").append(state).append("]").toString(); + return String.format("NetworkACLItem %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "aclId", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java index 280d5dfaf4b..37b9e7ff296 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java @@ -89,7 +89,7 @@ public class NetworkACLVO implements NetworkACL { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "vpcId"); + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "vpcId"); } public void setUuid(String uuid) { diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java index 72f6a89e70f..b1d4df35d4c 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc_gateways") @@ -163,9 +164,9 @@ public class VpcGatewayVO implements VpcGateway { @Override public String toString() { - StringBuilder buf = new StringBuilder("VpcGateway["); - buf.append(id).append("|").append(ip4Address.toString()).append("|").append(vpcId).append("]"); - return buf.toString(); + return String.format("VpcGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vpcId", "ip4Address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java index 41254ba4a8b..274b9fedecc 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java @@ -30,6 +30,7 @@ import javax.persistence.Table; import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc_offerings") @@ -180,8 +181,9 @@ public class VpcOfferingVO implements VpcOffering { @Override public String toString() { - StringBuilder buf = new StringBuilder("[VPC Offering ["); - return buf.append(id).append("-").append(name).append("]").toString(); + return String.format("VPCOffering %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } public void setName(String name) { diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java index 27d8227284b..e8ccc2ebcf1 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc") @@ -210,8 +211,9 @@ public class VpcVO implements Vpc { @Override public String toString() { - final StringBuilder buf = new StringBuilder("[VPC ["); - return buf.append(id).append("-").append(name).append("]").toString(); + return String.format("VPC %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java index 0bf110757d7..5cad366945f 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java @@ -32,6 +32,7 @@ import com.cloud.network.Network; import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_offerings") @@ -471,8 +472,8 @@ public class NetworkOfferingVO implements NetworkOffering { @Override public String toString() { - StringBuilder buf = new StringBuilder("[Network Offering ["); - return buf.append(id).append("-").append(trafficType).append("-").append(name).append("]").toString(); + return String.format("NetworkOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "trafficType")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java index 36e772edd3a..887939311b2 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java @@ -29,6 +29,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "project_invitations") @@ -127,9 +128,9 @@ public class ProjectInvitationVO implements ProjectInvitation { @Override public String toString() { - StringBuilder buf = new StringBuilder("ProjectInvitation["); - buf.append(id).append("|projectId=").append(projectId).append("|accountId=").append(forAccountId).append("]"); - return buf.toString(); + return String.format("ProjectInvitation %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "projectId", "forAccountId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java index c8faa00812c..4ac34eeab4c 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java @@ -117,7 +117,9 @@ public class ProjectVO implements Project, Identity, InternalIdentity { @Override public String toString() { - return String.format("Project %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid", "domainId")); + return String.format("Project %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "domainId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java index 53017447c07..a54c1dd9b08 100644 --- a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java @@ -19,8 +19,7 @@ package com.cloud.storage; import com.cloud.utils.db.GenericDao; import com.google.gson.annotations.Expose; import org.apache.cloudstack.storage.object.Bucket; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -257,7 +256,8 @@ public class BucketVO implements Bucket { @Override public String toString() { - return String.format("Bucket %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) - .append("ObjectStoreId", getObjectStoreId()).toString()); + return String.format("Bucket %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "objectStoreId")); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java index b4f112f98e8..79f5bcb5157 100644 --- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java @@ -34,6 +34,7 @@ import javax.persistence.Transient; import com.cloud.offering.DiskOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "disk_offering") @@ -588,4 +589,11 @@ public class DiskOfferingVO implements DiskOffering { public void setDiskSizeStrictness(boolean diskSizeStrictness) { this.diskSizeStrictness = diskSizeStrictness; } + + @Override + public String toString() { + return String.format("DiskOffering %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java index c7848586826..f57d9d3dccf 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java @@ -27,6 +27,7 @@ import javax.persistence.Table; import com.cloud.storage.snapshot.SnapshotPolicy; import com.cloud.utils.DateUtil.IntervalType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshot_policy") @@ -76,6 +77,13 @@ public class SnapshotPolicyVO implements SnapshotPolicy { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SnapshotPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "volumeId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java index 86e0da53666..5e013e76d3c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java @@ -29,8 +29,7 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.storage.snapshot.SnapshotSchedule; -import org.apache.commons.lang3.builder.ReflectionToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshot_schedule") @@ -73,6 +72,13 @@ public class SnapshotScheduleVO implements SnapshotSchedule { this.asyncJobId = null; } + @Override + public String toString() { + return String.format("SnapshotSchedule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "volumeId", "policyId")); + } + @Override public long getId() { return id; @@ -134,11 +140,4 @@ public class SnapshotScheduleVO implements SnapshotSchedule { public void setUuid(String uuid) { this.uuid = uuid; } - - @Override - public String toString() { - ReflectionToStringBuilder reflectionToStringBuilder = new ReflectionToStringBuilder(this, ToStringStyle.JSON_STYLE); - reflectionToStringBuilder.setExcludeFieldNames("id"); - return reflectionToStringBuilder.toString(); - } } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java index 39d2cdd0b77..19c67a91e2c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java @@ -30,12 +30,11 @@ import javax.persistence.Id; import javax.persistence.Table; import org.apache.cloudstack.util.HypervisorTypeConverter; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; import com.google.gson.annotations.Expose; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshots") @@ -283,7 +282,8 @@ public class SnapshotVO implements Snapshot { @Override public String toString() { - return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) - .append("volumeId", getVolumeId()).toString()); + return String.format("Snapshot %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "volumeId", "version")); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index 9dc9734f8ab..10d08601515 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -572,7 +572,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Override public String toString() { - return String.format("Template %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uniqueName", "format")); + return String.format("Template %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "uniqueName", "format")); } public void setRemoved(Date removed) { diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index ea57ef91237..653be54a910 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -514,7 +514,9 @@ public class VolumeVO implements Volume { @Override public String toString() { - return new StringBuilder("Vol[").append(id).append("|name=").append(name).append("|vm=").append(instanceId).append("|").append(volumeType).append("]").toString(); + return String.format("Volume %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "volumeType", "instanceId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java index e3eb2bf514d..266401e0c31 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java @@ -23,12 +23,16 @@ import java.util.List; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDaoImpl; import com.cloud.upgrade.SystemVmTemplateRegistration; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade41700to41710 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { @@ -95,24 +99,58 @@ public class Upgrade41700to41710 extends DbUpgradeAbstractImpl implements DbUpgr } } - private void updateStorPoolStorageType() { - storageDao = new PrimaryDataStoreDaoImpl(); - List storPoolPools = storageDao.findPoolsByProvider("StorPool"); - for (StoragePoolVO storagePoolVO : storPoolPools) { - if (StoragePoolType.SharedMountPoint == storagePoolVO.getPoolType()) { - storagePoolVO.setPoolType(StoragePoolType.StorPool); - storageDao.update(storagePoolVO.getId(), storagePoolVO); - } - updateStorageTypeForStorPoolVolumes(storagePoolVO.getId()); + protected PrimaryDataStoreDao getStorageDao() { + if (storageDao == null) { + storageDao = new PrimaryDataStoreDaoImpl(); } + return storageDao; } - private void updateStorageTypeForStorPoolVolumes(long storagePoolId) { - volumeDao = new VolumeDaoImpl(); - List volumes = volumeDao.findByPoolId(storagePoolId, null); - for (VolumeVO volumeVO : volumes) { - volumeVO.setPoolType(StoragePoolType.StorPool); - volumeDao.update(volumeVO.getId(), volumeVO); + protected VolumeDao getVolumeDao() { + if (volumeDao == null) { + volumeDao = new VolumeDaoImpl(); } + return volumeDao; + } + + /* + GenericDao.customSearch using GenericSearchBuilder and GenericDao.update using + GenericDao.createSearchBuilder used here to prevent any future issues when new fields + are added to StoragePoolVO or VolumeVO and this upgrade path starts to fail. + */ + protected void updateStorPoolStorageType() { + StoragePoolVO pool = getStorageDao().createForUpdate(); + pool.setPoolType(StoragePoolType.StorPool); + SearchBuilder sb = getStorageDao().createSearchBuilder(); + sb.and("provider", sb.entity().getStorageProviderName(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getPoolType(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("provider", StoragePoolType.StorPool.name()); + sc.setParameters("type", StoragePoolType.SharedMountPoint.name()); + getStorageDao().update(pool, sc); + + GenericSearchBuilder gSb = getStorageDao().createSearchBuilder(Long.class); + gSb.selectFields(gSb.entity().getId()); + gSb.and("provider", gSb.entity().getStorageProviderName(), SearchCriteria.Op.EQ); + gSb.done(); + SearchCriteria gSc = gSb.create(); + gSc.setParameters("provider", StoragePoolType.StorPool.name()); + List poolIds = getStorageDao().customSearch(gSc, null); + updateStorageTypeForStorPoolVolumes(poolIds); + } + + protected void updateStorageTypeForStorPoolVolumes(List storagePoolIds) { + if (CollectionUtils.isEmpty(storagePoolIds)) { + return; + } + VolumeVO volume = getVolumeDao().createForUpdate(); + volume.setPoolType(StoragePoolType.StorPool); + SearchBuilder sb = getVolumeDao().createSearchBuilder(); + sb.and("poolId", sb.entity().getPoolId(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("poolId", storagePoolIds.toArray()); + getVolumeDao().update(volume, sc); } } diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index 1da7d52a366..d204f67dc93 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; @Entity @@ -130,6 +131,12 @@ public class UserAccountVO implements UserAccount, InternalIdentity { public UserAccountVO() { } + @Override + public String toString() { + return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields + (this, "id", "uuid", "username", "accountName")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/user/UserVO.java b/engine/schema/src/main/java/com/cloud/user/UserVO.java index 7dac26429ac..6e355e102e6 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserVO.java @@ -296,7 +296,7 @@ public class UserVO implements User, Identity, InternalIdentity { @Override public String toString() { - return String.format("User %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "username", "uuid")); + return String.format("User %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "username")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java index f9ef5c40eba..2654b22374f 100644 --- a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java @@ -310,7 +310,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A if (!account.getNeedsCleanup()) { account.setNeedsCleanup(true); if (!update(accountId, account)) { - logger.warn("Failed to mark account id=" + accountId + " for cleanup"); + logger.warn("Failed to mark account {} for cleanup", account); } } } diff --git a/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java b/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java index 4437af29bc1..d5bd8c5aaae 100644 --- a/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java @@ -32,6 +32,7 @@ import javax.persistence.Table; import com.cloud.user.Account; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "instance_group") @@ -74,6 +75,12 @@ public class InstanceGroupVO implements InstanceGroup { super(); } + @Override + public String toString() { + return String.format("InstanceGroup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name")); + } + + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/vm/NicVO.java b/engine/schema/src/main/java/com/cloud/vm/NicVO.java index 936efd112b7..6c569e22dd9 100644 --- a/engine/schema/src/main/java/com/cloud/vm/NicVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/NicVO.java @@ -330,7 +330,10 @@ public class NicVO implements Nic { @Override public String toString() { - return String.format("Nic %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "instanceId", "deviceId", "broadcastUri", "reservationId", "iPv4Address")); + return String.format("Nic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "instanceId", + "deviceId", "broadcastUri", "reservationId", "iPv4Address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java index 093434052bc..4c8208b4be8 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; import com.cloud.vm.NicSecondaryIp; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "nic_secondary_ips") @@ -87,6 +88,14 @@ public class NicSecondaryIpVO implements NicSecondaryIp { @Column(name = "vmId") long vmId; + @Override + public String toString() { + return String.format("NicSecondaryIp %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "vmId", + "nicId", "ip4Address", "ip6Address", "networkId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 744518ba743..0e87e6bcb7d 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -109,7 +109,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Inject VolumeDao volumeDao; @Inject - HostDao hostDao; + protected HostDao hostDao; protected Attribute _updateTimeAttr; @@ -140,8 +140,6 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem private static final String COUNT_VMS_BY_ZONE_AND_STATE_AND_HOST_TAG = "SELECT COUNT(1) FROM vm_instance vi JOIN service_offering so ON vi.service_offering_id=so.id " + "JOIN vm_template vt ON vi.vm_template_id = vt.id WHERE vi.data_center_id = ? AND vi.state = ? AND vi.removed IS NULL AND (so.host_tag = ? OR vt.template_tag = ?)"; - @Inject - protected HostDao _hostDao; public VMInstanceDaoImpl() { } @@ -155,13 +153,13 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem IdStatesSearch.done(); VMClusterSearch = createSearchBuilder(); - SearchBuilder hostSearch = _hostDao.createSearchBuilder(); + SearchBuilder hostSearch = hostDao.createSearchBuilder(); VMClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), VMClusterSearch.entity().getHostId(), JoinType.INNER); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); VMClusterSearch.done(); LHVMClusterSearch = createSearchBuilder(); - SearchBuilder hostSearch1 = _hostDao.createSearchBuilder(); + SearchBuilder hostSearch1 = hostDao.createSearchBuilder(); LHVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), LHVMClusterSearch.entity().getLastHostId(), JoinType.INNER); LHVMClusterSearch.and("hostid", LHVMClusterSearch.entity().getHostId(), Op.NULL); hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ); @@ -577,13 +575,13 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem logger.debug(str.toString()); } else { - logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); + logger.debug("Unable to update the vm {}; the vm either doesn't exist or already removed", vm); } } if (vo != null && vo.getState() == newState) { // allow for concurrent update if target state has already been matched - logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); + logger.debug("VM {} state has been already been updated to {}", vo, newState); return true; } } @@ -954,8 +952,10 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem State instanceState = instance.getState(); if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) || (powerState == VirtualMachine.PowerState.PowerOn && instanceState == State.Stopped)) { - logger.debug(String.format("VM id: %d on host id: %d and power host id: %d is in %s state, but power state is %s", - instance.getId(), instance.getHostId(), powerHostId, instanceState, powerState)); + HostVO instanceHost = hostDao.findById(instance.getHostId()); + HostVO powerHost = powerHostId == instance.getHostId() ? instanceHost : hostDao.findById(powerHostId); + logger.debug("VM: {} on host: {} and power host : {} is in {} state, but power state is {}", + instance, instanceHost, powerHost, instanceState, powerState); return false; } return true; diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java index c48396ad021..5b6f97b82e7 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -36,6 +36,7 @@ import javax.persistence.Transient; import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vm_snapshots") @@ -145,6 +146,13 @@ public class VMSnapshotVO implements VMSnapshot { this.serviceOfferingId = serviceOfferingId; } + @Override + public String toString() { + return String.format("VMSnapshot %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "vmId")); + } + @Override public String getDescription() { return description; diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java index ab8f5f2cd84..03a978f8546 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java @@ -176,7 +176,7 @@ public class VMSnapshotDaoImpl extends GenericDaoBase implem .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); + logger.debug("Unable to update VM snapshot: {}, as there is no such snapshot exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java index 084df29fa42..cff139a9263 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java @@ -126,7 +126,8 @@ public class RoleVO implements Role { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid", "roleType"); + return String.format("Role %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "roleType")); } public boolean isPublicRole() { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java index 536b96c6567..9b8fc598171 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java @@ -28,6 +28,7 @@ import javax.persistence.Id; import javax.persistence.Table; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "affinity_group") @@ -119,9 +120,8 @@ public class AffinityGroupVO implements AffinityGroup { @Override public String toString() { - StringBuilder buf = new StringBuilder("AffinityGroup["); - buf.append(uuid).append("]"); - return buf.toString(); + return String.format("AffinityGroup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java index ba31dc59d39..fd3c0be18d2 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java @@ -29,6 +29,7 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.utils.DateUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "backup_schedule") @@ -68,6 +69,12 @@ public class BackupScheduleVO implements BackupSchedule { this.scheduledTimestamp = scheduledTimestamp; } + @Override + public String toString() { + return String.format("BackupSchedule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "vmId", "schedule", "scheduleType")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 9b285e66cab..b4cd2f7bada 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.backup; import com.cloud.utils.db.GenericDao; import com.google.gson.Gson; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import java.util.Arrays; @@ -94,6 +95,12 @@ public class BackupVO implements Backup { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("Backup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vmId", "backupType", "externalId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java index eab2e555d69..6afc2e7707a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.cluster; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -66,6 +67,13 @@ public class ClusterDrsPlanMigrationVO implements ClusterDrsPlanMigration { } + @Override + public String toString() { + return String.format("ClusterDrsPlanMigration %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "planId", "vmId", "jobId")); + } + public long getId() { return id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java index 0ce25ae90fe..68f7fe4b44e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.cluster; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -68,6 +69,13 @@ public class ClusterDrsPlanVO implements ClusterDrsPlan { uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("ClusterDrsPlan %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "clusterId")); + } + public long getId() { return id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java b/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java index 36aefa201f3..3c35f59659f 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.direct.download; import com.cloud.hypervisor.Hypervisor; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -57,6 +58,13 @@ public class DirectDownloadCertificateVO implements DirectDownloadCertificate { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("DirectDownloadCertificate %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "alias")); + } + public void setId(Long id) { this.id = id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java index af48e5e04ac..917f8bb800a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java @@ -50,6 +50,7 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.google.gson.Gson; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vm_instance") @@ -460,7 +461,7 @@ public class VMEntityVO implements VirtualMachine, FiniteStateObject details) { this.details = details; } + + @Override + public String toString() { + return String.format("ObjectStore %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "providerName")); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 707091adb87..c2f5d0a5d96 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -26,6 +26,7 @@ import com.cloud.utils.UriUtils; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -118,6 +119,9 @@ public class StoragePoolVO implements StoragePool { @Column(name = "capacity_iops", updatable = true, nullable = true) private Long capacityIops; + @Column(name = "used_iops", updatable = true, nullable = true) + private Long usedIops; + @Column(name = "hypervisor") @Convert(converter = HypervisorTypeConverter.class) private HypervisorType hypervisor; @@ -255,6 +259,14 @@ public class StoragePoolVO implements StoragePool { return capacityIops; } + public Long getUsedIops() { + return usedIops; + } + + public void setUsedIops(Long usedIops) { + this.usedIops = usedIops; + } + @Override public Long getClusterId() { return clusterId; @@ -370,7 +382,7 @@ public class StoragePoolVO implements StoragePool { @Override public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(poolType).append("]").toString(); + return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "poolType")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java index 3b869a5429f..8870bf6d4d8 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java @@ -23,6 +23,7 @@ import java.util.Date; import java.util.UUID; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -120,6 +121,13 @@ public class SharedFSVO implements SharedFS { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SharedFS %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public Class getEntityType() { return SharedFS.class; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java index 176f88c5f6b..e0065db1e77 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.vm.schedule; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -95,6 +96,11 @@ public class VMScheduleVO implements VMSchedule { this.enabled = enabled; } + @Override + public String toString() { + return String.format("VMSchedule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "action", "description")); + } + @Override public String getUuid() { return uuid; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java index 0c2dd94cce5..775e9cfe40c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.vm.schedule; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -71,6 +73,14 @@ public class VMScheduledJobVO implements VMScheduledJob { this.scheduledTime = scheduledTime; } + + @Override + public String toString() { + return String.format("VMScheduledJob %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "action", "vmScheduleId", "vmId", "asyncJobId")); + } + @Override public String getUuid() { return uuid; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql index aef99dd0c7f..8b70cce3404 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql @@ -32,3 +32,6 @@ CALL `cloud`.`IDEMPOTENT_ADD_FOREIGN_KEY`('cloud.mshost_peer', 'fk_mshost_peer__ -- Add last_id to the volumes table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'last_id', 'bigint(20) unsigned DEFAULT NULL'); + +-- Add used_iops column to support IOPS data in storage stats +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.storage_pool', 'used_iops', 'bigint unsigned DEFAULT NULL COMMENT "IOPS currently in use for this storage pool" '); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql index e6cc9458208..5d7585baa3b 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql @@ -31,7 +31,9 @@ SELECT `storage_pool`.`created` AS `created`, `storage_pool`.`removed` AS `removed`, `storage_pool`.`capacity_bytes` AS `capacity_bytes`, + `storage_pool`.`used_bytes` AS `used_bytes`, `storage_pool`.`capacity_iops` AS `capacity_iops`, + `storage_pool`.`used_iops` AS `used_iops`, `storage_pool`.`scope` AS `scope`, `storage_pool`.`hypervisor` AS `hypervisor`, `storage_pool`.`storage_provider_name` AS `storage_provider_name`, diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java new file mode 100644 index 00000000000..ad7c0cede25 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade41700to41710Test.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.util.Collections; +import java.util.List; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDaoImpl; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class Upgrade41700to41710Test { + @Spy + Upgrade41700to41710 upgrade41700to41710; + + @Test + public void testGetStorageDao_FirstInvocationCreatesInstance() { + PrimaryDataStoreDao dao1 = upgrade41700to41710.getStorageDao(); + Assert.assertNotNull(dao1); + Assert.assertTrue(dao1 instanceof PrimaryDataStoreDaoImpl); + } + + @Test + public void testGetStorageDao_SubsequentInvocationReturnsSameInstance() { + PrimaryDataStoreDao dao1 = upgrade41700to41710.getStorageDao(); + PrimaryDataStoreDao dao2 = upgrade41700to41710.getStorageDao(); + Assert.assertSame(dao1, dao2); + } + + @Test + public void testGetVolumeDao_FirstInvocationCreatesInstance() { + VolumeDao dao1 = upgrade41700to41710.getVolumeDao(); + Assert.assertNotNull(dao1); + Assert.assertTrue(dao1 instanceof VolumeDaoImpl); + } + + @Test + public void testGetVolumeDao_SubsequentInvocationReturnsSameInstance() { + VolumeDao dao1 = upgrade41700to41710.getVolumeDao(); + VolumeDao dao2 = upgrade41700to41710.getVolumeDao(); + Assert.assertSame(dao1, dao2); + } + + @Test + public void testUpdateStorPoolStorageType_WithPoolIds() { + PrimaryDataStoreDao storageDao = Mockito.mock(PrimaryDataStoreDao.class); + Mockito.doReturn(storageDao).when(upgrade41700to41710).getStorageDao(); + StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); + SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); + Mockito.when(storageDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(searchBuilder.entity()).thenReturn(pool); + Mockito.when(searchBuilder.create()).thenReturn(Mockito.mock(SearchCriteria.class)); + GenericSearchBuilder gSb = Mockito.mock(GenericSearchBuilder.class); + Mockito.doReturn(gSb).when(storageDao).createSearchBuilder(Mockito.any()); + Mockito.when(gSb.create()).thenReturn(Mockito.mock(SearchCriteria.class)); + Mockito.when(gSb.entity()).thenReturn(pool); + Mockito.when(storageDao.createForUpdate()).thenReturn(pool); + Mockito.doNothing().when(upgrade41700to41710).updateStorageTypeForStorPoolVolumes(Mockito.any()); + + Mockito.when(storageDao.update(Mockito.any(StoragePoolVO.class), Mockito.any())).thenReturn(2); + Mockito.when(storageDao.customSearch(Mockito.any(), Mockito.any())).thenReturn(List.of(1L, 2L)); + upgrade41700to41710.updateStorPoolStorageType(); + Mockito.verify(storageDao, Mockito.times(1)).update(Mockito.any(StoragePoolVO.class), Mockito.any()); + Mockito.verify(upgrade41700to41710, Mockito.times(1)).updateStorageTypeForStorPoolVolumes(Mockito.any()); + } + + @Test + public void testUpdateStorageTypeForStorPoolVolumes_EmptyPoolIds() { + VolumeDao volumeDao = Mockito.mock(VolumeDao.class); + List storagePoolIds = Collections.emptyList(); + upgrade41700to41710.updateStorageTypeForStorPoolVolumes(storagePoolIds); + Mockito.verify(volumeDao, Mockito.never()).update(Mockito.any(VolumeVO.class), Mockito.any()); + } + + @Test + public void testUpdateStorageTypeForStorPoolVolumes_WithPoolIds() { + VolumeDao volumeDao = Mockito.mock(VolumeDao.class); + List storagePoolIds = List.of(1L, 2L, 3L); + VolumeVO volume = Mockito.mock(VolumeVO.class); + SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); + SearchCriteria searchCriteria = Mockito.mock(SearchCriteria.class); + Mockito.when(volumeDao.createForUpdate()).thenReturn(volume); + Mockito.when(volumeDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(searchBuilder.entity()).thenReturn(volume); + Mockito.when(searchBuilder.create()).thenReturn(searchCriteria); + Mockito.when(volumeDao.update(Mockito.any(VolumeVO.class), Mockito.any())).thenReturn(3); + Mockito.doReturn(volumeDao).when(upgrade41700to41710).getVolumeDao(); + upgrade41700to41710.updateStorageTypeForStorPoolVolumes(storagePoolIds); + Mockito.verify(volumeDao).createForUpdate(); + Mockito.verify(volume).setPoolType(Storage.StoragePoolType.StorPool); + Mockito.verify(volumeDao).update(Mockito.eq(volume), Mockito.eq(searchCriteria)); + Mockito.verify(searchCriteria).setParameters("poolId", storagePoolIds.toArray()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java index 43679081550..5f8b2dd90ec 100644 --- a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java @@ -34,10 +34,13 @@ import java.util.ArrayList; import java.util.Calendar; import java.util.Date; +import com.cloud.host.dao.HostDao; import org.joda.time.DateTime; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; @@ -49,19 +52,24 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import org.mockito.junit.MockitoJUnitRunner; /** * Created by sudharma_jain on 3/2/17. */ - +@RunWith(MockitoJUnitRunner.class) public class VMInstanceDaoImplTest { + @InjectMocks @Spy - VMInstanceDaoImpl vmInstanceDao = new VMInstanceDaoImpl(); + VMInstanceDaoImpl vmInstanceDao; @Mock VMInstanceVO vm; + @Mock + HostDao _hostDao; + private AutoCloseable closeable; @Before @@ -111,9 +119,6 @@ public class VMInstanceDaoImplTest { @Test public void testUpdatePowerStateVmNotFound() { - when(vm.getPowerStateUpdateTime()).thenReturn(null); - when(vm.getPowerHostId()).thenReturn(1L); - when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); doReturn(null).when(vmInstanceDao).findById(anyLong()); boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOff, new Date()); @@ -154,7 +159,6 @@ public class VMInstanceDaoImplTest { when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Running); doReturn(vm).when(vmInstanceDao).findById(anyLong()); - doReturn(true).when(vmInstanceDao).update(anyLong(), any()); boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOn, new Date()); @@ -170,8 +174,8 @@ public class VMInstanceDaoImplTest { public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmStopped() { when(vm.getPowerStateUpdateTime()).thenReturn(null); when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getHostId()).thenReturn(1L); when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); - when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Stopped); doReturn(vm).when(vmInstanceDao).findById(anyLong()); doReturn(true).when(vmInstanceDao).update(anyLong(), any()); @@ -190,8 +194,8 @@ public class VMInstanceDaoImplTest { public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmRunning() { when(vm.getPowerStateUpdateTime()).thenReturn(null); when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getHostId()).thenReturn(1L); when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOff); - when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Running); doReturn(vm).when(vmInstanceDao).findById(anyLong()); doReturn(true).when(vmInstanceDao).update(anyLong(), any()); diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java index bf8fa43fe6c..1212bc66fd7 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java @@ -210,7 +210,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot TemplateInfo directDownloadTemplateInfo = templateDataFactory.getReadyBypassedTemplateOnPrimaryStore(srcVolumeInfo.getTemplateId(), destDataStore.getId(), destHost.getId()); if (directDownloadTemplateInfo != null) { - logger.debug(String.format("Template %s was of direct download type and successfully staged to primary store %s", directDownloadTemplateInfo.getId(), directDownloadTemplateInfo.getDataStore().getId())); + logger.debug("Template {} was of direct download type and successfully staged to primary store {}", directDownloadTemplateInfo.getImage(), directDownloadTemplateInfo.getDataStore()); return; } @@ -221,8 +221,8 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot TemplateInfo sourceTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), sourceTemplateDataStore); TemplateObjectTO sourceTemplate = new TemplateObjectTO(sourceTemplateInfo); - logger.debug(String.format("Could not find template [id=%s, name=%s] on the storage pool [id=%s]; copying the template to the target storage pool.", - srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getName(), destDataStore.getId())); + logger.debug("Could not find template [id={}, uuid={}, name={}] on the storage pool [{}]; copying the template to the target storage pool.", + srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getUuid(), sourceTemplateInfo.getName(), destDataStore); TemplateInfo destTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), destDataStore); final TemplateObjectTO destTemplate = new TemplateObjectTO(destTemplateInfo); @@ -234,7 +234,8 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot return; } } - logger.debug(String.format("Skipping 'copy template to target filesystem storage before migration' due to the template [%s] already exist on the storage pool [%s].", srcVolumeInfo.getTemplateId(), destStoragePool.getId())); + logger.debug("Skipping 'copy template to target filesystem storage before migration' due to the template [{}] already exist on the storage pool [{}].", + srcVolumeInfo.getTemplateId(), destStoragePool); } /** @@ -267,8 +268,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot } private String generateFailToCopyTemplateMessage(TemplateObjectTO sourceTemplate, DataStore destDataStore) { - return String.format("Failed to copy template [id=%s, name=%s] to the primary storage pool [id=%s].", sourceTemplate.getId(), - sourceTemplate.getName(), destDataStore.getId()); + return String.format("Failed to copy template [%s] to the primary storage pool [%s].", sourceTemplate, destDataStore); } /** diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java index b7468195f5d..808c319b40f 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java @@ -303,7 +303,6 @@ public class KvmNonManagedStorageSystemDataMotionTest { Mockito.lenient().when(dataStoreVO.getId()).thenReturn(0l); ImageStoreEntity destDataStore = Mockito.mock(ImageStoreImpl.class); - Mockito.doReturn(0l).when(destDataStore).getId(); Answer copyCommandAnswer = Mockito.mock(Answer.class); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index 5109118fb54..c6430bcf9f9 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -123,9 +123,9 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { if (logger.isDebugEnabled()) { if (!found) { - logger.debug("template " + templateId + " is not in store:" + store.getId() + ", type:" + store.getRole()); + logger.debug("template {} with id {} is not in store: {}, type: {}", templ, templateId, store, store.getRole()); } else { - logger.debug("template " + templateId + " is already in store:" + store.getId() + ", type:" + store.getRole()); + logger.debug("template {} with id {} is already in store:{}, type: {}", templ, templateId, store, store.getRole()); } } @@ -242,7 +242,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { HostVO host = hostDao.findById(hostId); List pools = getStoragePoolsForScope(host.getDataCenterId(), host.getClusterId(), hostId, host.getHypervisorType()); if (CollectionUtils.isEmpty(pools)) { - throw new CloudRuntimeException(String.format("No storage pool found to download template: %s", templateVO.getName())); + throw new CloudRuntimeException(String.format("No storage pool found to download template: %s", templateVO)); } List existingRefs = templatePoolDao.listByTemplateId(templateVO.getId()); return getOneMatchingPoolIdFromRefs(existingRefs, pools); @@ -274,7 +274,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { } if (poolId == null) { - throw new CloudRuntimeException("No storage pool specified to download template: " + templateId); + throw new CloudRuntimeException(String.format("No storage pool specified to download template: %s", templateVO)); } StoragePoolVO poolVO = primaryDataStoreDao.findById(poolId); @@ -284,7 +284,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { VMTemplateStoragePoolVO spoolRef = templatePoolDao.findByPoolTemplate(poolId, templateId, null); if (spoolRef == null) { - throw new CloudRuntimeException("Template not created on managed storage pool: " + poolId + " to copy the download template: " + templateId); + throw new CloudRuntimeException(String.format("Template not created on managed storage pool: %s to copy the download template: %s", poolVO, templateVO)); } else if (spoolRef.getDownloadState() == VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED) { directDownloadManager.downloadTemplate(templateId, poolId, hostId); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index abc955c2e49..38e0d0d081c 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -280,7 +280,7 @@ public class TemplateServiceImpl implements TemplateService { TemplateDataStoreVO tmpltHost = _vmTemplateStoreDao.findByStoreTemplate(store.getId(), template.getId()); if (tmpltHost == null) { associateTemplateToZone(template.getId(), dcId); - logger.info("Downloading builtin template " + template.getUniqueName() + " to data center: " + dcId); + logger.info("Downloading builtin template {} to data center: {}", template, dcId); TemplateInfo tmplt = _templateFactory.getTemplate(template.getId(), DataStoreRole.Image); createTemplateAsync(tmplt, store, null); } @@ -299,7 +299,7 @@ public class TemplateServiceImpl implements TemplateService { return false; } if (zoneId != null && _vmTemplateStoreDao.findByTemplateZone(template.getId(), zoneId, DataStoreRole.Image) == null) { - logger.debug(String.format("Template %s is not present on any image store for the zone ID: %d, its download cannot be skipped", template.getUniqueName(), zoneId)); + logger.debug("Template {} is not present on any image store for the zone ID: {}, its download cannot be skipped", template, zoneId); return false; } return true; @@ -376,29 +376,29 @@ public class TemplateServiceImpl implements TemplateService { TemplateProp tmpltInfo = templateInfos.remove(uniqueName); toBeDownloaded.remove(tmplt); if (tmpltStore != null) { - logger.info("Template Sync found " + uniqueName + " already in the image store"); + logger.info("Template Sync found {} already in the image store", tmplt); if (tmpltStore.getDownloadState() != Status.DOWNLOADED) { tmpltStore.setErrorString(""); } if (tmpltInfo.isCorrupted()) { tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId(); + String msg = String.format("Template %s is corrupted on secondary storage %s", tmplt, store); tmpltStore.setErrorString(msg); logger.info(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED, zoneId, null, msg, msg); if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) { - logger.info("Template Sync found " + uniqueName + " on image store " + storeId + " uploaded using SSVM as corrupted, marking it as failed"); + logger.info("Template Sync found {} on image store {} uploaded using SSVM as corrupted, marking it as failed", tmplt, store); tmpltStore.setState(State.Failed); try { stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.getUrl() == null) { - msg = "Private template (" + tmplt + ") with install path " + tmpltInfo.getInstallPath() + " is corrupted, please check in image store: " + tmpltStore.getDataStoreId(); + msg = String.format("Private template (%s) with install path %s is corrupted, please check in image store: %s", tmplt, tmpltInfo.getInstallPath(), store); logger.warn(msg); } else { - logger.info("Removing template_store_ref entry for corrupted template " + tmplt.getName()); + logger.info("Removing template_store_ref entry for corrupted template {}", tmplt); _vmTemplateStoreDao.remove(tmpltStore.getId()); toBeDownloaded.add(tmplt); } @@ -438,7 +438,7 @@ public class TemplateServiceImpl implements TemplateService { try { stateMachine.transitTo(tmplt, event, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } @@ -483,30 +483,30 @@ public class TemplateServiceImpl implements TemplateService { tmpltInfo.getPhysicalSize(), tmpltInfo.getSize(), VirtualMachineTemplate.class.getName(), tmplt.getUuid()); } } else if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) { - logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + " uploaded using SSVM, marking it as failed"); + logger.info("Template Sync did not find {} on image store {} uploaded using SSVM, marking it as failed", tmplt, store); toBeDownloaded.remove(tmplt); tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId(); + String msg = String.format("Template %s is corrupted on secondary storage %s", tmplt, store); tmpltStore.setErrorString(msg); tmpltStore.setState(State.Failed); _vmTemplateStoreDao.update(tmpltStore.getId(), tmpltStore); try { stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.isDirectDownload()) { - logger.info("Template " + tmplt.getName() + ":" + tmplt.getId() + " is marked for direct download, discarding it for download on image stores"); + logger.info("Template {} is marked for direct download, discarding it for download on image stores", tmplt); toBeDownloaded.remove(tmplt); } else { - logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + ", may request download based on available hypervisor types"); + logger.info("Template Sync did not find {} on image store {}, may request download based on available hypervisor types", tmplt, store); if (tmpltStore != null) { if (_storeMgr.isRegionStore(store) && tmpltStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED && tmpltStore.getState() == State.Ready && tmpltStore.getInstallPath() == null) { logger.info("Keep fake entry in template store table for migration of previous NFS to object store"); } else { - logger.info("Removing leftover template " + uniqueName + " entry from template store table"); + logger.info("Removing leftover template {} entry from template store table", tmplt); // remove those leftover entries _vmTemplateStoreDao.remove(tmpltStore.getId()); } @@ -530,12 +530,12 @@ public class TemplateServiceImpl implements TemplateService { // download. for (VMTemplateVO tmplt : toBeDownloaded) { if (tmplt.getUrl() == null) { // If url is null, skip downloading - logger.info("Skip downloading template " + tmplt.getUniqueName() + " since no url is specified."); + logger.info("Skip downloading template {} since no url is specified.", tmplt); continue; } // if this is private template, skip sync to a new image store if (isSkipTemplateStoreDownload(tmplt, zoneId)) { - logger.info("Skip sync downloading private template " + tmplt.getUniqueName() + " to a new image store"); + logger.info("Skip sync downloading private template {} to a new image store", tmplt); continue; } @@ -551,7 +551,7 @@ public class TemplateServiceImpl implements TemplateService { } if (availHypers.contains(tmplt.getHypervisorType())) { - logger.info("Downloading template " + tmplt.getUniqueName() + " to image store " + store.getName()); + logger.info("Downloading template {} to image store {}", tmplt, store); associateTemplateToZone(tmplt.getId(), zoneId); TemplateInfo tmpl = _templateFactory.getTemplate(tmplt.getId(), store); TemplateOpContext context = new TemplateOpContext<>(null,(TemplateObject)tmpl, null); @@ -560,8 +560,7 @@ public class TemplateServiceImpl implements TemplateService { caller.setContext(context); createTemplateAsync(tmpl, store, caller); } else { - logger.info("Skip downloading template " + tmplt.getUniqueName() + " since current data center does not have hypervisor " + - tmplt.getHypervisorType().toString()); + logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType()); } } } @@ -585,10 +584,10 @@ public class TemplateServiceImpl implements TemplateService { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - logger.info("Failed to deleted template at store: " + store.getName()); + logger.info("Failed to deleted template at store: {}", store); } else { - String description = "Deleted template " + tInfo.getTemplateName() + " on secondary storage " + storeId; + String description = String.format("Deleted template %s on secondary storage %s", tInfo.getTemplateName(), store); logger.info(description); } @@ -598,7 +597,7 @@ public class TemplateServiceImpl implements TemplateService { syncLock.unlock(); } } else { - logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing template sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on {}, another thread may be doing template sync on data store {} now.", lockString, store); } } finally { syncLock.releaseRef(); @@ -673,15 +672,14 @@ public class TemplateServiceImpl implements TemplateService { if (tmpltStore != null) { physicalSize = tmpltStore.getPhysicalSize(); } else { - logger.warn("No entry found in template_store_ref for template id: " + template.getId() + " and image store id: " + ds.getId() + - " at the end of registering template!"); + logger.warn("No entry found in template_store_ref for template: {} and image store: {} at the end of registering template!", template, ds); } Scope dsScope = ds.getScope(); if (dsScope.getScopeId() != null) { UsageEventUtils.publishUsageEvent(etype, template.getAccountId(), dsScope.getScopeId(), template.getId(), template.getName(), null, null, physicalSize, template.getSize(), VirtualMachineTemplate.class.getName(), template.getUuid()); } else { - logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); + logger.warn("Zone scope image store {} has a null scope id", ds); } _resourceLimitMgr.incrementResourceCount(accountId, Resource.ResourceType.secondary_storage, template.getSize()); } @@ -707,7 +705,7 @@ public class TemplateServiceImpl implements TemplateService { return tanswer.getTemplateInfo(); } else { if (logger.isDebugEnabled()) { - logger.debug("can not list template for secondary storage host " + ssStore.getId()); + logger.debug("can not list template for secondary storage host {}", ssStore); } } @@ -844,8 +842,7 @@ public class TemplateServiceImpl implements TemplateService { _resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize()); } else { // Delete the Datadisk templates that were already created as they are now invalid - logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent" - + " template download"); + logger.debug("Since creation of Datadisk template: {} failed, delete other Datadisk templates that were created as part of parent template download", templateVO); TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore); cleanupDatadiskTemplates(parentTemplateInfo); } @@ -859,8 +856,7 @@ public class TemplateServiceImpl implements TemplateService { TemplateApiResult result = null; result = templateFuture.get(); if (!result.isSuccess()) { - logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent" - + " template download"); + logger.debug("Since creation of parent template: {} failed, delete Datadisk templates that were created as part of parent template download", templateInfo); cleanupDatadiskTemplates(templateInfo); } return result.isSuccess(); @@ -909,12 +905,12 @@ public class TemplateServiceImpl implements TemplateService { DataStore imageStore = parentTemplateInfo.getDataStore(); List datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId()); for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) { - logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: {} from image store: {}", datadiskTemplateToDelete, imageStore); AsyncCallFuture future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore)); try { TemplateApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete datadisk template: {} from image store: {} due to: {}", datadiskTemplateToDelete, imageStore, result.getResult()); break; } _vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId()); @@ -1027,33 +1023,32 @@ public class TemplateServiceImpl implements TemplateService { // This routine is used to push templates currently on cache store, but not in region store to region store. // used in migrating existing NFS secondary storage to S3. @Override - public void syncTemplateToRegionStore(long templateId, DataStore store) { + public void syncTemplateToRegionStore(VirtualMachineTemplate template, DataStore store) { if (_storeMgr.isRegionStore(store)) { if (logger.isDebugEnabled()) { - logger.debug("Sync template " + templateId + " from cache to object store..."); + logger.debug("Sync template {} from cache to object store...", template); } // if template is on region wide object store, check if it is really downloaded there (by checking install_path). Sync template to region // wide store if it is not there physically. - TemplateInfo tmplOnStore = _templateFactory.getTemplate(templateId, store); + TemplateInfo tmplOnStore = _templateFactory.getTemplate(template.getId(), store); if (tmplOnStore == null) { - throw new CloudRuntimeException("Cannot find an entry in template_store_ref for template " + templateId + " on region store: " + store.getName()); + throw new CloudRuntimeException(String.format("Cannot find an entry in template_store_ref for template %s on region store: %s", template, store)); } if (tmplOnStore.getInstallPath() == null || tmplOnStore.getInstallPath().length() == 0) { // template is not on region store yet, sync to region store - TemplateInfo srcTemplate = _templateFactory.getReadyTemplateOnCache(templateId); + TemplateInfo srcTemplate = _templateFactory.getReadyTemplateOnCache(template.getId()); if (srcTemplate == null) { - throw new CloudRuntimeException("Cannot find template " + templateId + " on cache store"); + throw new CloudRuntimeException(String.format("Cannot find template %s on cache store", tmplOnStore)); } AsyncCallFuture future = syncToRegionStoreAsync(srcTemplate, store); try { TemplateApiResult result = future.get(); if (result.isFailed()) { - throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName() + ":" + - result.getResult()); + throw new CloudRuntimeException(String.format("sync template from cache to region wide store failed for image store %s: %s", store, result.getResult())); } _cacheMgr.releaseCacheObject(srcTemplate); // reduce reference count for template on cache, so it can recycled by schedule } catch (Exception ex) { - throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName()); + throw new CloudRuntimeException(String.format("sync template from cache to region wide store failed for image store %s", store)); } } } @@ -1071,8 +1066,7 @@ public class TemplateServiceImpl implements TemplateService { // generate a URL from source template ssvm to download to destination data store String url = generateCopyUrl(srcTemplate); if (url == null) { - logger.warn("Unable to start/resume copy of template " + srcTemplate.getUniqueName() + " to " + destStore.getName() + - ", no secondary storage vm in running state in source zone"); + logger.warn("Unable to start/resume copy of template {} to {}, no secondary storage vm in running state in source zone", srcTemplate, destStore); throw new CloudRuntimeException("No secondary VM in running state in source template zone "); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index d59f6d4c54d..14db5ea5771 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -228,4 +228,9 @@ public class ImageStoreImpl implements ImageStoreEntity { return driver.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, bootable, fileSize, callback); } + @Override + public String toString() { + return imageDataStoreVO.toString(); + } + } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index b7d83c70223..a3b7d0c9ecc 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import com.cloud.cpu.CPU; import com.cloud.storage.StorageManager; import com.cloud.user.UserData; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -102,6 +103,7 @@ public class TemplateObject implements TemplateInfo { imageVO.setSize(size); } + @Override public VMTemplateVO getImage() { if (imageVO == null) { String msg = String.format("Template Object is not properly initialised %s", this.toString()); @@ -596,4 +598,11 @@ public class TemplateObject implements TemplateInfo { public boolean isFollowRedirects() { return followRedirects; } + + @Override + public String toString() { + return String.format("TemplateObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "imageVO", "dataStore")); + } } diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java index f1c27526f52..a96d87ada04 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.object.ObjectStoreDriver; import org.apache.cloudstack.storage.object.ObjectStoreEntity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.Date; import java.util.List; @@ -57,6 +58,13 @@ public class ObjectStoreImpl implements ObjectStoreEntity { return instance; } + @Override + public String toString() { + return String.format("ObjectStoreImpl %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "objectStoreVO", "provider")); + } + @Override public DataStoreDriver getDriver() { return this.driver; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index afc8be1e5f9..f5cfaf07274 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -136,7 +136,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { try { snapObj.processEvent(Snapshot.Event.OperationNotPerformed); } catch (NoTransitionException e) { - logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString()); + logger.debug("Failed to change state of the snapshot {}, due to {}", snapshot, e); throw new CloudRuntimeException(e.toString()); } return snapshotDataFactory.getSnapshot(snapObj.getId(), store); @@ -231,7 +231,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - logger.debug(String.format("Deleting snapshot %s from image cache [%s].", snapshotTo, cacheSnap.getDataStore().getName())); + logger.debug("Deleting snapshot {} from image cache [{}].", snapshotTo, cacheSnap.getDataStore()); cacheSnap.delete(); } } @@ -297,7 +297,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Destroying.equals(snapshotVO.getState())) { - throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status"); + throw new InvalidParameterValueException(String.format("Can't delete snapshot %s due to it is in %s Status", snapshotVO, snapshotVO.getState())); } return destroySnapshotEntriesAndFiles(snapshotVO, zoneId); @@ -442,7 +442,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); if (snapshotVO == null) { - throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to get lock on snapshot: %s", snapshot)); } try { @@ -463,9 +463,9 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { result = snapshotSvr.revertSnapshot(snapshot); if (!result) { - logger.debug("Failed to revert snapshot: " + snapshot.getId()); + logger.debug("Failed to revert snapshot: {}", snapshot); - throw new CloudRuntimeException("Failed to revert snapshot: " + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to revert snapshot: %s", snapshot)); } } finally { if (result) { @@ -498,7 +498,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); if (snapshotVO == null) { - throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to get lock on snapshot: %s", snapshot)); } try { diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index fc5e61ef710..4d8919ccc48 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -57,7 +57,7 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory { public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { SnapshotVO snapshot = snapshotDao.findById(obj.getId()); if (snapshot == null) { - throw new CloudRuntimeException("Can't find snapshot: " + obj.getId()); + throw new CloudRuntimeException("Can't find snapshot: " + obj); } SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); return so; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 961a647d7a8..a3964bd461e 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -41,6 +41,7 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -184,8 +185,7 @@ public class SnapshotObject implements SnapshotInfo { processEvent(Event.OperationNotPerformed); } catch (NoTransitionException ex) { logger.error("no transition error: ", ex); - throw new CloudRuntimeException("Error marking snapshot backed up: " + - this.snapshot.getId() + " " + ex.getMessage()); + throw new CloudRuntimeException(String.format("Error marking snapshot backed up: %s %s", this.snapshot, ex.getMessage())); } } @@ -370,12 +370,11 @@ public class SnapshotObject implements SnapshotInfo { if (snapshotTO.getVolume() != null && snapshotTO.getVolume().getPath() != null) { VolumeVO vol = volumeDao.findByUuid(snapshotTO.getVolume().getUuid()); if (vol != null) { - logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " + vol.getPath() + "->" + - snapshotTO.getVolume().getPath()); + logger.info("Update volume path change due to snapshot operation, volume {} path: {}->{}", vol, vol.getPath(), snapshotTO.getVolume().getPath()); vol.setPath(snapshotTO.getVolume().getPath()); volumeDao.update(vol.getId(), vol); } else { - logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); + logger.error("Couldn't find the original volume: {}", snapshotTO.getVolume()); } } } else { @@ -466,4 +465,11 @@ public class SnapshotObject implements SnapshotInfo { public Class getEntityType() { return Snapshot.class; } + + @Override + public String toString() { + return String.format("SnapshotObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "snapshot", "store")); + } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index dafc40e0674..2173aba3f05 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -198,7 +198,7 @@ public class SnapshotServiceImpl implements SnapshotService { AsyncCallFuture future = context.future; SnapshotResult snapResult = new SnapshotResult(snapshot, result.getAnswer()); if (result.isFailed()) { - logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); + logger.debug("create snapshot {} failed: {}", context.snapshot, result.getResult()); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); @@ -267,14 +267,14 @@ public class SnapshotServiceImpl implements SnapshotService { PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)snapshotOnPrimary.getDataStore().getDriver(); primaryStore.takeSnapshot(snapshot, caller); } catch (Exception e) { - logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + logger.debug("Failed to take snapshot: {}", snapshot, e); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); } catch (NoTransitionException e1) { logger.debug("Failed to change state for event: OperationFailed", e); } - throw new CloudRuntimeException("Failed to take snapshot" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to take snapshot %s", snapshot)); } SnapshotResult result; @@ -407,7 +407,7 @@ public class SnapshotServiceImpl implements SnapshotService { if (createSnapshotPayload.getAsyncBackup()) { _snapshotDao.remove(srcSnapshot.getId()); destSnapshot.processEvent(Event.OperationFailed); - throw new SnapshotBackupException("Failed in creating backup of snapshot with ID "+srcSnapshot.getId()); + throw new SnapshotBackupException(String.format("Failed in creating backup of snapshot %s", srcSnapshot)); } else { destSnapshot.processEvent(Event.OperationFailed); //if backup snapshot failed, mark srcSnapshot in snapshot_store_ref as failed also @@ -486,7 +486,7 @@ public class SnapshotServiceImpl implements SnapshotService { SnapshotResult res = null; try { if (result.isFailed()) { - logger.debug(String.format("Failed to delete snapshot [%s] due to: [%s].", snapshot.getUuid(), result.getResult())); + logger.debug("Failed to delete snapshot [{}] due to: [{}].", snapshot, result.getResult()); snapshot.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); res = new SnapshotResult(context.snapshot, null); res.setResult(result.getResult()); @@ -495,8 +495,8 @@ public class SnapshotServiceImpl implements SnapshotService { res = new SnapshotResult(context.snapshot, null); } } catch (Exception e) { - logger.error(String.format("An exception occurred while processing an event in delete snapshot callback from snapshot [%s].", snapshot.getUuid())); - logger.debug(String.format("Exception while processing an event in delete snapshot callback from snapshot [%s].", snapshot.getUuid()), e); + logger.error("An exception occurred while processing an event in delete snapshot callback from snapshot [{}].", snapshot); + logger.debug("Exception while processing an event in delete snapshot callback from snapshot [{}].", snapshot, e); res.setResult(e.toString()); } future.complete(res); @@ -541,11 +541,11 @@ public class SnapshotServiceImpl implements SnapshotService { if (result.isFailed()) { throw new CloudRuntimeException(result.getResult()); } - logger.debug(String.format("Successfully deleted snapshot [%s] with ID [%s].", snapInfo.getName(), snapInfo.getUuid())); + logger.debug("Successfully deleted snapshot [{}].", snapInfo); return true; } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Failed to delete snapshot [%s] due to: [%s].", snapInfo.getUuid(), e.getMessage())); - logger.debug(String.format("Failed to delete snapshot [%s].", snapInfo.getUuid()), e); + logger.error("Failed to delete snapshot [{}] due to: [{}].", snapInfo, e.getMessage()); + logger.debug("Failed to delete snapshot [{}].", snapInfo, e); } return false; @@ -556,7 +556,7 @@ public class SnapshotServiceImpl implements SnapshotService { PrimaryDataStore store = null; SnapshotInfo snapshotOnPrimaryStore = _snapshotFactory.getSnapshotOnPrimaryStore(snapshot.getId()); if (snapshotOnPrimaryStore == null) { - logger.warn("Cannot find an entry for snapshot " + snapshot.getId() + " on primary storage pools, searching with volume's primary storage pool"); + logger.warn("Cannot find an entry for snapshot {} on primary storage pools, searching with volume's primary storage pool", snapshot); VolumeInfo volumeInfo = volFactory.getVolume(snapshot.getVolumeId(), DataStoreRole.Primary); store = (PrimaryDataStore)volumeInfo.getDataStore(); } else { @@ -595,7 +595,7 @@ public class SnapshotServiceImpl implements SnapshotService { List snapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.BackedUp); if (snapshots != null) { for (SnapshotVO snapshot : snapshots) { - syncSnapshotToRegionStore(snapshot.getId(), store); + syncSnapshotToRegionStore(snapshot, store); } } } @@ -603,53 +603,49 @@ public class SnapshotServiceImpl implements SnapshotService { @Override public void cleanupVolumeDuringSnapshotFailure(Long volumeId, Long snapshotId) { - SnapshotVO snaphsot = _snapshotDao.findById(snapshotId); + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snaphsot != null) { - if (snaphsot.getState() != Snapshot.State.BackedUp) { + if (snapshot != null) { + if (snapshot.getState() != Snapshot.State.BackedUp) { List snapshotDataStoreVOs = _snapshotStoreDao.findBySnapshotId(snapshotId); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotDataStoreVOs) { - logger.debug("Remove snapshot " + snapshotId + ", status " + snapshotDataStoreVO.getState() + - " on snapshot_store_ref table with id: " + snapshotDataStoreVO.getId()); + logger.debug("Remove snapshot {}, status {} on snapshot_store_ref table with id: {}", snapshot, snapshotDataStoreVO.getState(), snapshotDataStoreVO.getId()); _snapshotStoreDao.remove(snapshotDataStoreVO.getId()); } - logger.debug("Remove snapshot " + snapshotId + " status " + snaphsot.getState() + " from snapshot table"); + logger.debug("Remove snapshot {} status {} from snapshot table", snapshot, snapshot.getState()); _snapshotDao.remove(snapshotId); } } - - } // push one individual snapshots currently on cache store to region store if it is not there already - private void syncSnapshotToRegionStore(long snapshotId, DataStore store){ + private void syncSnapshotToRegionStore(SnapshotVO snapshot, DataStore store){ // if snapshot is already on region wide object store, check if it is really downloaded there (by checking install_path). Sync snapshot to region // wide store if it is not there physically. - SnapshotInfo snapOnStore = _snapshotFactory.getSnapshot(snapshotId, store); + SnapshotInfo snapOnStore = _snapshotFactory.getSnapshot(snapshot.getId(), store); if (snapOnStore == null) { - throw new CloudRuntimeException("Cannot find an entry in snapshot_store_ref for snapshot " + snapshotId + " on region store: " + store.getName()); + throw new CloudRuntimeException(String.format("Cannot find an entry in snapshot_store_ref for snapshot %s on region store: %s", snapshot, store)); } if (snapOnStore.getPath() == null || snapOnStore.getPath().length() == 0) { if (logger.isDebugEnabled()) { - logger.debug("sync snapshot " + snapshotId + " from cache to object store..."); + logger.debug("sync snapshot {} from cache to object store...", snapshot); } // snapshot is not on region store yet, sync to region store - SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshotId); + SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshot.getId()); if (srcSnapshot == null) { - throw new CloudRuntimeException("Cannot find snapshot " + snapshotId + " on cache store"); + throw new CloudRuntimeException(String.format("Cannot find snapshot %s on cache store", snapshot)); } AsyncCallFuture future = syncToRegionStoreAsync(srcSnapshot, store); try { SnapshotResult result = future.get(); if (result.isFailed()) { - throw new CloudRuntimeException("sync snapshot from cache to region wide store failed for image store " + store.getName() + ":" - + result.getResult()); + throw new CloudRuntimeException(String.format("sync snapshot from cache to region wide store failed for image store %s: %s", store, result.getResult())); } _cacheMgr.releaseCacheObject(srcSnapshot); // reduce reference count for template on cache, so it can recycled by schedule } catch (Exception ex) { - throw new CloudRuntimeException("sync snapshot from cache to region wide store failed for image store " + store.getName()); + throw new CloudRuntimeException(String.format("sync snapshot from cache to region wide store failed for image store %s", store)); } } @@ -723,7 +719,7 @@ public class SnapshotServiceImpl implements SnapshotService { _snapshotDao.remove(srcSnapshot.getId()); } catch (NoTransitionException ex) { logger.debug("Failed to create backup " + ex.toString()); - throw new CloudRuntimeException("Failed to backup snapshot" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to backup snapshot%s", snapshot)); } } }); @@ -769,7 +765,7 @@ public class SnapshotServiceImpl implements SnapshotService { AsyncCallFuture future = new AsyncCallFuture<>(); EndPoint ep = epSelector.select(snapshot); if (ep == null) { - logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %d with store %d", snapshot.getId(), snapshot.getDataStore().getId())); + logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %s with store %s", snapshot.getSnapshotVO(), snapshot.getDataStore())); throw new ResourceUnavailableException("No secondary VM in running state in source snapshot zone", DataCenter.class, snapshot.getDataCenterId()); } DataStore store = snapshot.getDataStore(); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index d27beecfdda..1ec6e20fc9e 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -26,6 +26,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -162,8 +163,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot Map srcVolumeDestSnapshotMap = new HashMap<>(); List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); - StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); long prev_chain_size = 0; long virtual_size=0; for (VolumeObjectTO volume : volumeTOs) { @@ -188,7 +188,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot vmSnapshotVO.setParent(current.getId()); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap); if (snapshotGroup == null) { throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool"); @@ -291,7 +291,8 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot boolean result = false; try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); + Long storagePoolId = storagePool.getId(); Map srcSnapshotDestVolumeMap = new HashMap<>(); for (VolumeObjectTO volume : volumeTOs) { VMSnapshotDetailsVO vmSnapshotDetail = vmSnapshotDetailsDao.findDetail(vmSnapshotVO.getId(), "Vol_" + volume.getId() + "_Snapshot"); @@ -305,7 +306,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for reverting VM snapshot: " + vmSnapshot.getName()); } - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); result = client.revertSnapshot(systemId, srcSnapshotDestVolumeMap); if (!result) { throw new CloudRuntimeException("Failed to revert VM snapshot on PowerFlex storage pool"); @@ -314,7 +315,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot finalizeRevert(vmSnapshotVO, volumeTOs); result = true; } catch (Exception e) { - String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed due to " + e.getMessage(); + String errMsg = String.format("Revert VM: %s to snapshot: %s failed due to %s", userVm, vmSnapshotVO, e.getMessage()); logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { @@ -378,8 +379,8 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); - Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); - String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); + String systemId = storagePoolDetailsDao.findDetail(storagePool.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); if (systemId == null) { throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName()); } @@ -390,7 +391,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot } String snapshotGroupId = vmSnapshotDetailsVO.getValue(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); int volumesDeleted = client.deleteSnapshotGroup(systemId, snapshotGroupId); if (volumesDeleted <= 0) { throw new CloudRuntimeException("Failed to delete VM snapshot: " + vmSnapshot.getName()); @@ -509,7 +510,7 @@ public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshot } } - private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + private ScaleIOGatewayClient getScaleIOClient(final StoragePool storagePool) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java index e3f4bcbdeca..758bbe0c8c4 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java @@ -73,6 +73,11 @@ public class LocalHostEndpoint implements EndPoint { return 0; } + @Override + public String getUuid() { + return ""; + } + @Override public String getHostAddr() { return "127.0.0.0"; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java index fdde4ce3e62..bd4bce29b0a 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java @@ -55,6 +55,7 @@ public class RemoteHostEndPoint implements EndPoint { protected Logger logger = LogManager.getLogger(getClass()); private long hostId; + private String hostUuid; private String hostAddress; private String publicAddress; @@ -74,6 +75,7 @@ public class RemoteHostEndPoint implements EndPoint { private void configure(Host host) { hostId = host.getId(); + hostUuid = host.getUuid(); hostAddress = host.getPrivateIpAddress(); publicAddress = host.getPublicIpAddress(); if (Host.Type.SecondaryStorageVM == host.getType()) { @@ -106,6 +108,11 @@ public class RemoteHostEndPoint implements EndPoint { return hostId; } + @Override + public String getUuid() { + return hostUuid; + } + // used when HypervisorGuruManager choose a different host to send command private void setId(long id) { HostVO host = _hostDao.findById(id); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index a621e8a076d..79be6588899 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -299,7 +299,7 @@ public class DefaultEndPointSelector implements EndPointSelector { @Override public EndPoint select(DataObject srcData, DataObject destData, StorageAction action, boolean encryptionRequired) { - logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary " + srcData.getId() + " dest=" + destData.getId()); + logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary {} dest={}", srcData, destData); if (action == StorageAction.BACKUPSNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { SnapshotInfo srcSnapshot = (SnapshotInfo)srcData; VolumeInfo volumeInfo = srcSnapshot.getBaseVolume(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index 01842441e26..f2a3d99f93c 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -25,6 +25,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.uservm.UserVm; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -150,23 +151,25 @@ public class VMSnapshotHelperImpl implements VMSnapshotHelper { } @Override - public Long getStoragePoolForVM(Long vmId) { - List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vmId); + public StoragePoolVO getStoragePoolForVM(UserVm vm) { + List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vm.getId()); if (rootVolumes == null || rootVolumes.isEmpty()) { - throw new InvalidParameterValueException("Failed to find root volume for the user vm:" + vmId); + throw new InvalidParameterValueException(String.format("Failed to find root volume for the user vm: %s", vm)); } VolumeVO rootVolume = rootVolumes.get(0); StoragePoolVO rootVolumePool = primaryDataStoreDao.findById(rootVolume.getPoolId()); if (rootVolumePool == null) { - throw new InvalidParameterValueException("Failed to find root volume storage pool for the user vm:" + vmId); + throw new InvalidParameterValueException(String.format( + "Failed to find storage pool for root volume %s for the user vm: %s", rootVolume, vm)); } if (rootVolumePool.isInMaintenance()) { - throw new InvalidParameterValueException("Storage pool for the user vm:" + vmId + " is in maintenance"); + throw new InvalidParameterValueException(String.format( + "Storage pool %s for root volume %s of the user vm: %s is in maintenance", rootVolumePool, rootVolume, vm)); } - return rootVolumePool.getId(); + return rootVolumePool; } @Override diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index db3f798a68a..a2e9eff2a08 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -177,19 +177,19 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { if (data.getType() == DataObjectType.TEMPLATE) { caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading template to data store " + dataStore.getId()); + logger.debug("Downloading template to data store {}", dataStore); } _downloadMonitor.downloadTemplateToStorage(data, caller); } else if (data.getType() == DataObjectType.VOLUME) { caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading volume to data store " + dataStore.getId()); + logger.debug("Downloading volume to data store {}", dataStore); } _downloadMonitor.downloadVolumeToStorage(data, caller); } else if (data.getType() == DataObjectType.SNAPSHOT) { caller.setCallback(caller.getTarget().createSnapshotAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading volume to data store " + dataStore.getId()); + logger.debug("Downloading snapshot to data store {}", dataStore); } _downloadMonitor.downloadSnapshotToStorage(data, caller); } @@ -212,7 +212,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { OVFInformationTO ovfInformationTO = answer.getOvfInformationTO(); boolean persistDeployAsIs = deployAsIsHelper.persistTemplateOVFInformationAndUpdateGuestOS(template.getId(), ovfInformationTO, tmpltStoreVO); if (!persistDeployAsIs) { - logger.info("Failed persisting deploy-as-is template details for template " + template.getName()); + logger.info("Failed persisting deploy-as-is template details for template {}", template); return null; } } @@ -221,7 +221,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { } return null; } - logger.info("Updating store ref entry for template " + template.getName()); + logger.info("Updating store ref entry for template {}", template); TemplateDataStoreVO updateBuilder = _templateStoreDao.createForUpdate(); updateBuilder.setDownloadPercent(answer.getDownloadPct()); updateBuilder.setDownloadState(answer.getDownloadStatus()); @@ -378,7 +378,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { result.setResult(answer.getDetails()); } } catch (Exception ex) { - logger.debug("Unable to destroy " + data.getType().toString() + ": " + data.getId(), ex); + logger.debug("Unable to destroy {}: [id: {}, uuid: {}, name: {}]", data.getType().toString(), data.getId(), data.getUuid(), data.getName(), ex); result.setResult(ex.toString()); } callback.complete(result); @@ -443,14 +443,11 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { answer = agentMgr.send(endPoint.getId(), cmd); answer.setContextParam("cmd", cmdExecId.toString()); return answer; - } catch (AgentUnavailableException e) { + } catch (AgentUnavailableException | OperationTimedoutException e) { errMsg = e.toString(); - logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); - } catch (OperationTimedoutException e) { - errMsg = e.toString(); - logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent [id: {}, uuid: {}]: {}", endPoint.getId(), endPoint.getUuid(), e.toString()); } - throw new CloudRuntimeException("Failed to send command, due to Agent:" + endPoint.getId() + ", " + errMsg); + throw new CloudRuntimeException(String.format("Failed to send command, due to Agent: [id: %s, uuid: %s], %s", endPoint.getId(), endPoint.getUuid(), errMsg)); } @Override @@ -507,7 +504,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { Answer answer = null; String errMsg = null; if (logger.isDebugEnabled()) { - logger.debug("Create Datadisk template: " + dataDiskTemplate.getId()); + logger.debug("Create Datadisk template: {}", dataDiskTemplate); } CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable); EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore()); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 35153a10996..6d6cb7b70a9 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -20,6 +20,8 @@ package org.apache.cloudstack.storage.vmsnapshot; import java.util.List; +import com.cloud.uservm.UserVm; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.VMSnapshotTO; @@ -37,7 +39,7 @@ public interface VMSnapshotHelper { VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); - Long getStoragePoolForVM(Long vmId); + StoragePoolVO getStoragePoolForVM(UserVm vm); Storage.StoragePoolType getStoragePoolType(Long poolId); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index e4c26932619..7f28224a316 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -26,6 +26,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.logging.log4j.Logger; @@ -74,6 +75,8 @@ public class PrimaryDataStoreHelper { @Inject protected StoragePoolHostDao storagePoolHostDao; @Inject + protected ClusterDao clusterDao; + @Inject private AnnotationDao annotationDao; public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { @@ -266,7 +269,7 @@ public class PrimaryDataStoreHelper { this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, poolVO.getId()); txn.commit(); - logger.debug("Storage pool id=" + poolVO.getId() + " is removed successfully"); + logger.debug("Storage pool {} is removed successfully", poolVO); return true; } @@ -286,7 +289,7 @@ public class PrimaryDataStoreHelper { _capacityDao.update(capacity.getId(), capacity); } }); - logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to zone"); + logger.debug("Scope of storage pool {} is changed to zone", pool); } public void switchToCluster(DataStore store, ClusterScope clusterScope) { @@ -312,6 +315,6 @@ public class PrimaryDataStoreHelper { _capacityDao.update(capacity.getId(), capacity); } }); - logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to cluster id=" + clusterScope.getScopeId()); + logger.debug("Scope of storage pool {} is changed to cluster {}", pool::toString, () -> clusterDao.findById(clusterScope.getScopeId())); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 7f373fa9988..6a10c26cc0b 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -469,6 +468,6 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid"); + return pdsv.toString(); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java index 1ee4d40a567..de3be809a05 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java @@ -22,6 +22,7 @@ import java.util.List; import javax.inject.Inject; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; @@ -55,6 +56,8 @@ public class BasePrimaryDataStoreLifeCycleImpl { @Inject protected HostDao hostDao; @Inject + protected DataCenterDao zoneDao; + @Inject protected StoragePoolHostDao storagePoolHostDao; private List getPoolHostsList(ClusterScope clusterScope, HypervisorType hypervisorType) { @@ -76,7 +79,7 @@ public class BasePrimaryDataStoreLifeCycleImpl { if (hosts != null) { for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), store.getId()); + storageMgr.connectHostToSharedPool(host, store.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + store, e); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index c6d9fab5f17..1afc1a68b44 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -27,6 +27,7 @@ import com.cloud.agent.api.SetupPersistentNetworkCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.alert.AlertManager; import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.StorageConflictException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; @@ -85,6 +86,8 @@ public class DefaultHostListener implements HypervisorHostListener { @Inject StorageService storageService; @Inject + DataCenterDao zoneDao; + @Inject NetworkOfferingDao networkOfferingDao; @Inject HostDao hostDao; @@ -103,7 +106,7 @@ public class DefaultHostListener implements HypervisorHostListener { private boolean createPersistentNetworkResourcesOnHost(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - logger.warn(String.format("Host with id %ld can't be found", hostId)); + logger.warn("Host with id {} can't be found", hostId); return false; } setupPersistentNetwork(host); @@ -134,32 +137,32 @@ public class DefaultHostListener implements HypervisorHostListener { ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); cmd.setWait(modifyStoragePoolCommandWait); - logger.debug(String.format("Sending modify storage pool command to agent: %d for storage pool: %d with timeout %d seconds", - hostId, poolId, cmd.getWait())); + HostVO host = hostDao.findById(hostId); + logger.debug("Sending modify storage pool command to agent: {} for storage pool: {} with timeout {} seconds", host, pool, cmd.getWait()); final Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %d", pool, hostId); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + - pool.getId()); + throw new CloudRuntimeException(String.format("Unable to establish connection from storage head to storage pool %s due to %s %s", + pool, answer.getDetails(), pool.getUuid())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + - pool.getId() + "Host=" + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format( + "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%s Host=%d", pool, hostId); ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) { String datastoreName = mspAnswer.getLocalDatastoreName(); List localStoragePools = this.primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); - throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" - + localStoragePool.getName()); + logger.warn("Storage pool: {} has already been added as local storage: {}", pool, localStoragePool); + throw new StorageConflictException(String.format( + "Cannot add shared storage pool: %s because it has already been added as local storage: %s", pool, localStoragePool)); } } } @@ -173,7 +176,7 @@ public class DefaultHostListener implements HypervisorHostListener { storageService.updateStorageCapabilities(poolId, false); - logger.info("Connection established between storage pool " + pool + " and host " + hostId); + logger.info("Connection established between storage pool {} and host {}", pool, host); return createPersistentNetworkResourcesOnHost(hostId); } @@ -222,12 +225,11 @@ public class DefaultHostListener implements HypervisorHostListener { new CleanupPersistentNetworkResourceCommand(createNicTOFromNetworkAndOffering(persistentNetworkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(hostId, cleanupCmd); if (answer == null) { - logger.error("Unable to get answer to the cleanup persistent network command " + persistentNetworkVO.getId()); + logger.error("Unable to get answer to the cleanup persistent network command {}", persistentNetworkVO); continue; } if (!answer.getResult()) { - String msg = String.format("Unable to cleanup persistent network resources from network %d on the host %d", persistentNetworkVO.getId(), hostId); - logger.error(msg); + logger.error("Unable to cleanup persistent network resources from network {} on the host {}", persistentNetworkVO, hostId); } } return true; @@ -258,11 +260,11 @@ public class DefaultHostListener implements HypervisorHostListener { new SetupPersistentNetworkCommand(createNicTOFromNetworkAndOffering(networkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(host.getId(), persistentNetworkCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get answer to the setup persistent network command " + networkVO.getId()); + throw new CloudRuntimeException(String.format("Unable to get answer to the setup persistent network command %s", networkVO)); } if (!answer.getResult()) { - String msg = String.format("Unable to create persistent network resources for network %d on the host %d in zone %d", networkVO.getId(), host.getId(), networkVO.getDataCenterId()); - logger.error(msg); + logger.error("Unable to create persistent network resources for network {} on the host {} in zone {}", + networkVO::toString, host::toString, () -> zoneDao.findById(networkVO.getDataCenterId())); } } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 825a8cbd941..4a9f34c9f56 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -900,7 +900,7 @@ public class VolumeObject implements VolumeInfo { volumeVO.setPassphraseId(null); volumeDao.persist(volumeVO); - logger.debug(String.format("Checking to see if we can delete passphrase id %s", passphraseId)); + logger.debug("Checking to see if we can delete passphrase id {} for volume {}", passphraseId, volumeVO); List volumes = volumeDao.listVolumesByPassphraseId(passphraseId); if (volumes != null && !volumes.isEmpty()) { @@ -944,4 +944,11 @@ public class VolumeObject implements VolumeInfo { public boolean isFollowRedirects() { return followRedirects; } + + @Override + public String toString() { + return String.format("VolumeObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "volumeVO", "dataStore")); + } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 3ca1d9201db..bf67be91108 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,6 +32,7 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; @@ -163,6 +164,8 @@ public class VolumeServiceImpl implements VolumeService { @Inject VolumeDao volDao; @Inject + VMInstanceDao vmDao; + @Inject PrimaryDataStoreProviderManager dataStoreMgr; @Inject DataMotionService motionSrv; @@ -191,7 +194,7 @@ public class VolumeServiceImpl implements VolumeService { @Inject HostDao _hostDao; @Inject - private PrimaryDataStoreDao storagePoolDao; + PrimaryDataStoreDao storagePoolDao; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Inject @@ -378,7 +381,7 @@ public class VolumeServiceImpl implements VolumeService { if (volume.getDataStore() == null) { logger.info("Expunge volume with no data store specified"); if (canVolumeBeRemoved(volume.getId())) { - logger.info("Volume " + volume.getId() + " is not referred anywhere, remove it from volumes table"); + logger.info("Volume {} is not referred anywhere, remove it from volumes table", volume); volDao.remove(volume.getId()); } future.complete(result); @@ -389,7 +392,7 @@ public class VolumeServiceImpl implements VolumeService { VolumeDataStoreVO volumeStore = _volumeStoreDao.findByVolume(volume.getId()); if (volumeStore != null) { if (volumeStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { - String msg = "Volume: " + volume.getName() + " is currently being uploaded; can't delete it."; + String msg = String.format("Volume: %s is currently being uploaded; can't delete it.", volume); logger.debug(msg); result.setSuccess(false); result.setResult(msg); @@ -400,7 +403,7 @@ public class VolumeServiceImpl implements VolumeService { VolumeVO vol = volDao.findById(volume.getId()); if (vol == null) { - logger.debug("Volume " + volume.getId() + " is not found"); + logger.debug("Volume {} is not found", volume); future.complete(result); return future; } @@ -484,7 +487,7 @@ public class VolumeServiceImpl implements VolumeService { } if (canVolumeBeRemoved(vo.getId())) { - logger.info("Volume " + vo.getId() + " is not referred anywhere, remove it from volumes table"); + logger.info("Volume {} is not referred anywhere, remove it from volumes table", vo); volDao.remove(vo.getId()); } @@ -633,10 +636,10 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId(), deployAsIsConfiguration); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + template.getUniqueName() + " in storage pool " + dataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", template.getImage(), dataStore)); } else { if (logger.isDebugEnabled()) { - logger.debug("Found template " + template.getUniqueName() + " in storage pool " + dataStore.getId() + " with VMTemplateStoragePool id: " + templatePoolRef.getId()); + logger.debug("Found template {} in storage pool {} with VMTemplateStoragePool: {}", template.getImage(), dataStore, templatePoolRef); } } long templatePoolRefId = templatePoolRef.getId(); @@ -656,8 +659,8 @@ public class VolumeServiceImpl implements VolumeService { } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId(), deployAsIsConfiguration); if (templatePoolRef != null && templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { - logger.info( - "Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); + logger.info("Unable to acquire lock on VMTemplateStoragePool {}, But " + + "Template {} is already copied to primary storage, skip copying", templatePoolRefId, template); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); return; } @@ -669,7 +672,7 @@ public class VolumeServiceImpl implements VolumeService { } try { if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { - logger.info("Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); + logger.info("Template {} is already copied to primary storage, skip copying", template.getImage()); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); return; } @@ -891,7 +894,7 @@ public class VolumeServiceImpl implements VolumeService { try { destroyAndReallocateManagedVolume((VolumeInfo) vo); } catch (CloudRuntimeException ex) { - logger.warn("Couldn't destroy managed volume: " + vo.getId()); + logger.warn("Couldn't destroy managed volume: {}", vo); } } @@ -912,7 +915,7 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore); } else if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { // Template already exists return templateOnPrimary; @@ -945,7 +948,7 @@ public class VolumeServiceImpl implements VolumeService { errMesg = callback.result.getResult(); } templateOnPrimary.processEvent(Event.OperationFailed); - throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg); + throw new CloudRuntimeException(String.format("Unable to create template %s on primary storage %s: %s", templateOnPrimary.getImage(), destPrimaryDataStore, errMesg)); } templateOnPrimary.processEvent(Event.OperationSuccessed); @@ -1036,7 +1039,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary.getImage(), destHost)); } templateOnPrimary.processEvent(Event.CopyingRequested); @@ -1057,12 +1060,12 @@ public class VolumeServiceImpl implements VolumeService { targets.add(details); - removeDynamicTargets(destHost.getId(), targets); + removeDynamicTargets(destHost, targets); } } if (result.isFailed()) { - throw new CloudRuntimeException("Failed to copy template " + templateOnPrimary.getId() + " to primary storage " + destPrimaryDataStore.getId() + ": " + result.getResult()); + throw new CloudRuntimeException(String.format("Failed to copy template %s to primary storage %s: %s", templateOnPrimary, destPrimaryDataStore, result.getResult())); // XXX: I find it is useful to destroy the volume on primary storage instead of another thread trying the copy again because I've seen // something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail). // For now, I just retry the copy. @@ -1080,7 +1083,7 @@ public class VolumeServiceImpl implements VolumeService { } } - private void removeDynamicTargets(long hostId, List> targets) { + private void removeDynamicTargets(Host host, List> targets) { ModifyTargetsCommand cmd = new ModifyTargetsCommand(); cmd.setTargets(targets); @@ -1088,20 +1091,16 @@ public class VolumeServiceImpl implements VolumeService { cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { - String msg = "Unable to get an answer to the modify targets command"; - - logger.warn(msg); + logger.warn("Unable to get an answer to the modify targets command"); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; - - logger.warn(msg); + logger.warn("Unable to modify target on the following host: {}", host); } } @@ -1117,12 +1116,12 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), volumeInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + templateOnPrimary.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", templateOnPrimary.getImage(), destPrimaryDataStore)); } //XXX: not sure if this the right thing to do here. We can always fallback to the "copy from sec storage" if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { - throw new CloudRuntimeException("Template " + templateOnPrimary.getUniqueName() + " has not been downloaded to primary storage."); + throw new CloudRuntimeException(String.format("Template %s has not been downloaded to primary storage.", templateOnPrimary.getImage())); } try { @@ -1149,7 +1148,7 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateOnPrimary.getUniqueName() + " in storage pool " + srcTemplateOnPrimary.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateOnPrimary.getImage(), srcTemplateOnPrimary)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1162,7 +1161,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost)); } _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); @@ -1211,7 +1210,7 @@ public class VolumeServiceImpl implements VolumeService { try { destroyAndReallocateManagedVolume(volumeInfo); } catch (CloudRuntimeException ex) { - logger.warn("Failed to destroy managed volume: " + volumeInfo.getId()); + logger.warn("Failed to destroy managed volume: {}", volumeInfo); errMsg += " : " + ex.getMessage(); } @@ -1248,21 +1247,21 @@ public class VolumeServiceImpl implements VolumeService { VolumeVO newVolume = (VolumeVO) newVol; newVolume.set_iScsiName(null); volDao.update(newVolume.getId(), newVolume); - logger.debug("Allocated new volume: " + newVolume.getId() + " for the VM: " + volume.getInstanceId()); + logger.debug("Allocated new volume: {} for the VM: {}", newVolume::toString, () -> (volume.getInstanceId() != null ? vmDao.findById(volume.getInstanceId()) : null)); try { AsyncCallFuture expungeVolumeFuture = expungeVolumeAsync(volumeInfo); VolumeApiResult expungeVolumeResult = expungeVolumeFuture.get(); if (expungeVolumeResult.isFailed()) { - logger.warn("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); - throw new CloudRuntimeException("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); + logger.warn("Failed to expunge volume: {} that was created", volumeInfo); + throw new CloudRuntimeException(String.format("Failed to expunge volume: %s that was created", volumeInfo.getVolume())); } } catch (Exception ex) { if (canVolumeBeRemoved(volumeInfo.getId())) { volDao.remove(volumeInfo.getId()); } - logger.warn("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); - throw new CloudRuntimeException("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); + logger.warn("Unable to expunge volume: {} due to: {}", volumeInfo, ex.getMessage()); + throw new CloudRuntimeException(String.format("Unable to expunge volume: %s due to: %s", volumeInfo.getVolume(), ex.getMessage())); } } @@ -1382,12 +1381,14 @@ public class VolumeServiceImpl implements VolumeService { templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + throw new CloudRuntimeException(String.format("Failed to create template %s on primary storage: %s", + srcTemplateInfo.getImage(), destPrimaryDataStore)); } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", + srcTemplateInfo.getImage(), destPrimaryDataStore)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1407,7 +1408,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost)); } templateOnPrimary.processEvent(Event.CopyingRequested); @@ -1416,8 +1417,8 @@ public class VolumeServiceImpl implements VolumeService { //Download and copy template to the managed volume TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId); if (templateOnPrimaryNow == null) { - logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); - throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + logger.debug("Failed to prepare ready bypassed template: {} on primary storage: {}", srcTemplateInfo, templateOnPrimary); + throw new CloudRuntimeException(String.format("Failed to prepare ready bypassed template: %s on primary storage: %s", srcTemplateInfo, templateOnPrimary)); } templateOnPrimary.processEvent(Event.OperationSuccessed); return templateOnPrimaryNow; @@ -1459,7 +1460,7 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture future = new AsyncCallFuture<>(); if (storageCanCloneVolume && computeSupportsVolumeClone) { - logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); + logger.debug("Storage {} can support cloning using a cached template and compute side is OK with volume cloning.", destPrimaryDataStore); GlobalLock lock = null; TemplateInfo templateOnPrimary = null; @@ -1483,7 +1484,7 @@ public class VolumeServiceImpl implements VolumeService { templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + throw new CloudRuntimeException(String.format("Failed to create template %s on primary storage: %s", srcTemplateInfo, destPrimaryDataStore)); } } @@ -1491,7 +1492,7 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateInfo, destPrimaryDataStore)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1516,7 +1517,7 @@ public class VolumeServiceImpl implements VolumeService { if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { // We have a template on primary storage. Clone it to new volume. - logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + logger.debug("Creating a clone from template on primary storage {}", destPrimaryDataStore); createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { @@ -1848,13 +1849,8 @@ public class VolumeServiceImpl implements VolumeService { if (logger.isDebugEnabled()) { String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : ""); - String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)" - , srcVolume.getName() - , srcVolume.getId() - , srcRole - , destStore.getName() - , destStore.getId() - , destStore.getRole()); + String msg = String.format("copying %s (role=%s) to %s (role=%s)", + srcVolume, srcRole, destStore, destStore.getRole()); logger.debug(msg); } @@ -1917,7 +1913,7 @@ public class VolumeServiceImpl implements VolumeService { srcVolume.processEvent(Event.OperationFailed); destroyVolume(destVolume.getId()); if (destVolume.getStoragePoolType() == StoragePoolType.PowerFlex) { - logger.info("Dest volume " + destVolume.getId() + " can be removed"); + logger.info("Dest volume {} can be removed", destVolume); destVolume.processEvent(Event.ExpungeRequested); destVolume.processEvent(Event.OperationSuccessed); volDao.remove(destVolume.getId()); @@ -1961,12 +1957,12 @@ public class VolumeServiceImpl implements VolumeService { volDao.updateUuid(sourceVolumeId, destinationVolume.getId()); volDao.detachVolume(sourceVolumeId); - logger.info(String.format("Cleaning up %s on storage [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId())); + logger.info("Cleaning up {} on storage [{}].", sourceVolumeVo, sourceVolume.getDataStore()); destroyVolume(sourceVolumeId); try { if (sourceVolume.getStoragePoolType() == StoragePoolType.PowerFlex) { - logger.info(String.format("Source volume %s can be removed.", sourceVolumeVo.getVolumeDescription())); + logger.info("Source volume {} can be removed.", sourceVolumeVo); sourceVolume.processEvent(Event.ExpungeRequested); sourceVolume.processEvent(Event.OperationSuccessed); volDao.remove(sourceVolume.getId()); @@ -1975,7 +1971,7 @@ public class VolumeServiceImpl implements VolumeService { expungeSourceVolumeAfterMigration(sourceVolumeVo, retryExpungeVolumeAsync); return true; } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Failed to clean up %s on storage [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId()), e); + logger.error("Failed to clean up {} on storage [{}].", sourceVolumeVo, sourceVolume.getDataStore(), e); return false; } } @@ -1987,13 +1983,14 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture destroyFuture = expungeVolumeAsync(sourceVolume); VolumeApiResult volumeApiResult = destroyFuture.get(); + StoragePoolVO pool = storagePoolDao.findById(sourceVolumeVo.getPoolId()); if (volumeApiResult.isSuccess()) { - logger.debug(String.format("%s on storage [%s] was cleaned up successfully.", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId())); + logger.debug("{} on storage [{}] was cleaned up successfully.", sourceVolumeVo, pool); return; } - String message = String.format("Failed to clean up %s on storage [%s] due to [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId(), - volumeApiResult.getResult()); + String message = String.format("Failed to clean up %s on storage [%s] due to [%s].", + sourceVolumeVo, pool, volumeApiResult.getResult()); if (!retryExpungeVolumeAsync) { logger.warn(message); @@ -2059,7 +2056,7 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture createVolumeFuture = createVolumeAsync(destVolume, destStore); VolumeApiResult createVolumeResult = createVolumeFuture.get(); if (createVolumeResult.isFailed()) { - logger.debug("Failed to create dest volume " + destVolume.getId() + ", volume can be removed"); + logger.debug("Failed to create dest volume {}, volume can be removed", destVolume); destroyVolume(destVolume.getId()); destVolume.processEvent(Event.ExpungeRequested); destVolume.processEvent(Event.OperationSuccessed); @@ -2204,14 +2201,12 @@ public class VolumeServiceImpl implements VolumeService { } if (StringUtils.isAnyEmpty(srcPoolSystemId, destPoolSystemId)) { - logger.warn("PowerFlex src pool: " + srcDataStore.getId() + " or dest pool: " + destDataStore.getId() + - " storage instance details are not available"); + logger.warn("PowerFlex src pool: {} or dest pool: {} storage instance details are not available", srcDataStore, destDataStore); return false; } if (!srcPoolSystemId.equals(destPoolSystemId)) { - logger.debug("PowerFlex src pool: " + srcDataStore.getId() + " and dest pool: " + destDataStore.getId() + - " belongs to different storage instances, create new managed volume"); + logger.debug("PowerFlex src pool: {} and dest pool: {} belongs to different storage instances, create new managed volume", srcDataStore, destDataStore); return true; } } @@ -2407,7 +2402,7 @@ public class VolumeServiceImpl implements VolumeService { EndPoint ep = _epSelector.select(store); if (ep == null) { - String errorMessage = "There is no secondary storage VM for image store " + store.getName(); + String errorMessage = String.format("There is no secondary storage VM for image store %s", store); logger.warn(errorMessage); throw new CloudRuntimeException(errorMessage); } @@ -2439,7 +2434,7 @@ public class VolumeServiceImpl implements VolumeService { if (volStore != null) { physicalSize = volStore.getPhysicalSize(); } else { - logger.warn("No entry found in volume_store_ref for volume id: " + vo.getId() + " and image store id: " + ds.getId() + " at the end of uploading volume!"); + logger.warn("No entry found in volume_store_ref for volume: {} and image store: {} at the end of uploading volume!", vo, ds); } Scope dsScope = ds.getScope(); if (dsScope.getScopeType() == ScopeType.ZONE) { @@ -2447,7 +2442,7 @@ public class VolumeServiceImpl implements VolumeService { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_UPLOAD, vo.getAccountId(), dsScope.getScopeId(), vo.getId(), vo.getName(), null, null, physicalSize, vo.getSize(), Volume.class.getName(), vo.getUuid()); } else { - logger.warn("Zone scope image store " + ds.getId() + " has a null scope id"); + logger.warn("Zone scope image store {} has a null scope id", ds); } } else if (dsScope.getScopeType() == ScopeType.REGION) { // publish usage event for region-wide image store using a -1 zoneId for 4.2, need to revisit post-4.2 @@ -2590,8 +2585,11 @@ public class VolumeServiceImpl implements VolumeService { for (VolumeDataStoreVO volumeStore : dbVolumes) { VolumeVO volume = volDao.findById(volumeStore.getVolumeId()); if (volume == null) { - logger.warn("Volume_store_ref table shows that volume " + volumeStore.getVolumeId() + " is on image store " + storeId - + ", but the volume is not found in volumes table, potentially some bugs in deleteVolume, so we just treat this volume to be deleted and mark it as destroyed"); + logger.warn("Volume_store_ref table shows that volume {} is " + + "on image store {}, but the volume is not found in volumes " + + "table, potentially some bugs in deleteVolume, so we just " + + "treat this volume to be deleted and mark it as destroyed", + volumeStore.getVolumeId(), store); volumeStore.setDestroyed(true); _volumeStoreDao.update(volumeStore.getId(), volumeStore); continue; @@ -2600,27 +2598,26 @@ public class VolumeServiceImpl implements VolumeService { if (volumeInfos.containsKey(volume.getId())) { TemplateProp volInfo = volumeInfos.remove(volume.getId()); toBeDownloaded.remove(volumeStore); - logger.info("Volume Sync found " + volume.getUuid() + " already in the volume image store table"); + logger.info("Volume Sync found {} already in the volume image store table", volume); if (volumeStore.getDownloadState() != Status.DOWNLOADED) { volumeStore.setErrorString(""); } if (volInfo.isCorrupted()) { volumeStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Volume " + volume.getUuid() + " is corrupted on image store"; + String msg = String.format("Volume %s is corrupted on image store", volume); volumeStore.setErrorString(msg); logger.info(msg); if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) { - logger.info("Volume Sync found " + volume.getUuid() + " uploaded using SSVM on image store " + storeId + " as corrupted, marking it as failed"); + logger.info("Volume Sync found {} uploaded using SSVM on image store {} as corrupted, marking it as failed", volume, store); _volumeStoreDao.update(volumeStore.getId(), volumeStore); // mark volume as failed, so that storage GC will clean it up VolumeObject volObj = (VolumeObject)volFactory.getVolume(volume.getId()); volObj.processEvent(Event.OperationFailed); } else if (volumeStore.getDownloadUrl() == null) { - msg = "Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() + " is corrupted, please check in image store: " - + volumeStore.getDataStoreId(); + msg = String.format("Volume (%s) with install path %s is corrupted, please check in image store: %s", volume, volInfo.getInstallPath(), store); logger.warn(msg); } else { - logger.info("Removing volume_store_ref entry for corrupted volume " + volume.getName()); + logger.info("Removing volume_store_ref entry for corrupted volume {}", volume); _volumeStoreDao.remove(volumeStore.getId()); toBeDownloaded.add(volumeStore); } @@ -2660,10 +2657,10 @@ public class VolumeServiceImpl implements VolumeService { } continue; } else if (volume.getState() == State.NotUploaded || volume.getState() == State.UploadInProgress) { // failed uploads through SSVM - logger.info("Volume Sync did not find " + volume.getUuid() + " uploaded using SSVM on image store " + storeId + ", marking it as failed"); + logger.info("Volume Sync did not find {} uploaded using SSVM on image store {}, marking it as failed", volume, store); toBeDownloaded.remove(volumeStore); volumeStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Volume " + volume.getUuid() + " is corrupted on image store"; + String msg = String.format("Volume %s is corrupted on image store", volume); volumeStore.setErrorString(msg); _volumeStoreDao.update(volumeStore.getId(), volumeStore); // mark volume as failed, so that storage GC will clean it up @@ -2673,7 +2670,7 @@ public class VolumeServiceImpl implements VolumeService { } // Volume is not on secondary but we should download. if (volumeStore.getDownloadState() != Status.DOWNLOADED) { - logger.info("Volume Sync did not find " + volume.getName() + " ready on image store " + storeId + ", will request download to start/resume shortly"); + logger.info("Volume Sync did not find {} ready on image store {}, will request download to start/resume shortly", volume, store); } } @@ -2694,9 +2691,10 @@ public class VolumeServiceImpl implements VolumeService { } } - logger.debug("Volume " + volumeHost.getVolumeId() + " needs to be downloaded to " + store.getName()); // reset volume status back to Allocated VolumeObject vol = (VolumeObject)volFactory.getVolume(volumeHost.getVolumeId()); + logger.debug("Volume {} needs to be downloaded to {}", vol, store); + vol.processEvent(Event.OperationFailed); // reset back volume status // remove leftover volume_store_ref entry since re-download will create it again _volumeStoreDao.remove(volumeHost.getId()); @@ -2729,10 +2727,10 @@ public class VolumeServiceImpl implements VolumeService { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - logger.info("Failed to deleted volume at store: " + store.getName()); + logger.info("Failed to deleted volume at store: {}", store); } else { - String description = "Deleted volume " + tInfo.getTemplateName() + " on secondary storage " + storeId; + String description = String.format("Deleted volume %s on secondary storage %s", tInfo.getTemplateName(), store); logger.info(description); } } @@ -2740,7 +2738,7 @@ public class VolumeServiceImpl implements VolumeService { syncLock.unlock(); } } else { - logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing volume sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on {}, another thread may be doing volume sync on data store {} now.", lockString, store); } } finally { syncLock.releaseRef(); @@ -2763,7 +2761,7 @@ public class VolumeServiceImpl implements VolumeService { return tanswer.getTemplateInfo(); } else { if (logger.isDebugEnabled()) { - logger.debug("Can not list volumes for image store " + store.getId()); + logger.debug("Can not list volumes for image store {}", store); } } @@ -2776,11 +2774,11 @@ public class VolumeServiceImpl implements VolumeService { try { snapshot = snapshotMgr.takeSnapshot(volume); } catch (CloudRuntimeException cre) { - logger.error("Take snapshot: " + volume.getId() + " failed", cre); + logger.error("Take snapshot: {} failed", volume, cre); throw cre; } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug("unknown exception while taking snapshot for volume " + volume.getId() + " was caught", e); + logger.debug("unknown exception while taking snapshot for volume {} was caught", volume, e); } throw new CloudRuntimeException("Failed to take snapshot", e); } @@ -2793,7 +2791,7 @@ public class VolumeServiceImpl implements VolumeService { if (HypervisorType.KVM.equals(host.getHypervisorType()) && DataObjectType.VOLUME.equals(dataObject.getType())) { VolumeInfo volumeInfo = volFactory.getVolume(dataObject.getId()); if (VolumeApiServiceImpl.AllowCheckAndRepairVolume.valueIn(volumeInfo.getPoolId())) { - logger.info(String.format("Trying to check and repair the volume %d", dataObject.getId())); + logger.info("Trying to check and repair the volume {}", dataObject); String repair = CheckAndRepairVolumeCmd.RepairValues.LEAKS.name().toLowerCase(); CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(repair); volumeInfo.addPayload(payload); @@ -2904,9 +2902,8 @@ public class VolumeServiceImpl implements VolumeService { logger.debug(String.format("Volume [%s] is not present in the secondary storage. Therefore we do not need to move it in the secondary storage.", volume)); return; } - logger.debug(String.format("Volume [%s] is present in secondary storage. It will be necessary to move it from the source account's [%s] folder to the destination " - + "account's [%s] folder.", - volume.getUuid(), sourceAccount, destAccount)); + logger.debug("Volume [{}] is present in secondary storage. It will be necessary to move it from the source account's [{}] folder to the destination " + + "account's [{}] folder.", volume, sourceAccount, destAccount); VolumeInfo volumeInfo = volFactory.getVolume(volume.getId(), DataStoreRole.Image); String datastoreUri = volumeInfo.getDataStore().getUri(); @@ -2922,17 +2919,17 @@ public class VolumeServiceImpl implements VolumeService { if (!answer.getResult()) { String msg = String.format("Unable to move volume [%s] from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage, due " + "to [%s].", - volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); + volume, srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } - logger.debug(String.format("Volume [%s] was moved from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage.", - volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount)); + logger.debug("Volume [{}] was moved from [{}] (source account's [{}] folder) to [{}] (destination account's [{}] folder) in the secondary storage.", + volume, srcPath.getParent(), sourceAccount, destPath, destAccount); volumeStore.setInstallPath(String.format("%s/%s", destPath, srcPath.getFileName().toString())); if (!_volumeStoreDao.update(volumeStore.getId(), volumeStore)) { - String msg = String.format("Unable to update volume [%s] install path in the DB.", volumeStore.getVolumeId()); + String msg = String.format("Unable to update volume [%s] install path in the DB.", volume); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java index 355eb075129..538ba1a1761 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java @@ -100,7 +100,7 @@ public class BasePrimaryDataStoreLifeCycleImplTest { ReflectionTestUtils.setField(host, "id", HOST_ID); List hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware); Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, ZONE_ID, CLUSTER_ID)).thenReturn(Arrays.asList(host)); - Mockito.when(storageManager.connectHostToSharedPool(HOST_ID, POOL_ID)).thenReturn(true); + Mockito.when(storageManager.connectHostToSharedPool(host, POOL_ID)).thenReturn(true); dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, null); diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index c4241dfbc3a..aa5ac3b9a76 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -47,6 +47,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -85,6 +86,9 @@ public class VolumeServiceTest extends TestCase{ @Mock StorageManager storageManagerMock; + @Mock + PrimaryDataStoreDao primaryDataStoreDao; + @Mock VolumeVO volumeVoMock; @@ -105,6 +109,7 @@ public class VolumeServiceTest extends TestCase{ volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; volumeServiceImplSpy._storageMgr = storageManagerMock; volumeServiceImplSpy._hostDao = hostDaoMock; + volumeServiceImplSpy.storagePoolDao = primaryDataStoreDao; volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock; } @@ -220,6 +225,7 @@ public class VolumeServiceTest extends TestCase{ VolumeVO vo = new VolumeVO() {}; vo.setPoolType(Storage.StoragePoolType.Filesystem); volumeObject.configure(null, vo); + vo.setPoolId(1L); List exceptions = new ArrayList<>(Arrays.asList(new InterruptedException(), new ExecutionException() {})); diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index 0ec566a4194..e26e32e7b2e 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -110,6 +110,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C // _msid is the unique persistent identifier that peer name is based upon // private Long _mshostId = null; + private ManagementServerHostVO _mshost = null; protected long _msId = ManagementServerNode.getManagementServerId(); protected long _runId = System.currentTimeMillis(); @@ -380,11 +381,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } try { if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); + logger.debug("Forwarding {} to {}", cmds, peer); } executeAsync(peerName, agentId, cmds, true); } catch (final Exception e) { - logger.warn("Caught exception while talking to " + peer.getMsid()); + logger.warn("Caught exception while talking to {}", peer); } } } @@ -408,11 +409,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final String peerName = Long.toString(peer.getMsid()); try { if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + status + " to " + peer.getMsid()); + logger.debug("Forwarding {} to {}", status, peer); } sendStatus(peerName, status); } catch (final Exception e) { - String msg = String.format("Caught exception while talking to %d", peer.getMsid()); + String msg = String.format("Caught exception while talking to %s", peer); logger.warn(msg); logger.debug(msg, e); } @@ -503,7 +504,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C logger.debug("Notify management server node join to listeners."); for (final ManagementServerHostVO mshost : nodeList) { - logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Joining node, IP: {}, ms: {}", mshost.getServiceIP(), mshost); } } @@ -523,7 +524,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C for (final ManagementServerHostVO mshost : nodeList) { if (logger.isDebugEnabled()) { - logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Leaving node, IP: {}, ms: {}", mshost.getServiceIP(), mshost); } cancelClusterRequestToPeer(String.valueOf(mshost.getMsid())); } @@ -595,7 +596,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profilerHeartbeatUpdate.start(); txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); if (logger.isTraceEnabled()) { - logger.trace("Cluster manager heartbeat update, id:" + _mshostId); + logger.trace("Cluster manager heartbeat update, id: {}, mshost: {}", _mshostId, _mshost); } _mshostDao.update(_mshostId, _runId, DateUtil.currentGMTTime()); @@ -603,7 +604,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C profilerPeerScan.start(); if (logger.isTraceEnabled()) { - logger.trace("Cluster manager peer-scan, id:" + _mshostId); + logger.trace("Cluster manager peer-scan, id: {}, mshost: {}", _mshostId, _mshost); } if (!_peerScanInited) { @@ -811,8 +812,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (logger.isInfoEnabled()) { logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); for (final ManagementServerHostVO host : inactiveList) { - logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + - ", version: " + host.getVersion()); + logger.info("management server node ms: {}, service ip: {}, version: {}", host, host.getServiceIP(), host.getVersion()); } } @@ -820,7 +820,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C for (final ManagementServerHostVO host : inactiveList) { // Check if peer state is Up in the period if (!_mshostPeerDao.isPeerUpState(_mshostId, host.getId(), new Date(cutTime.getTime() - HeartbeatThreshold.value()))) { - logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and did not send node status to this node"); + logger.warn("Management node {} is detected inactive by timestamp and did not send node status to this node", host); downHostList.add(host); } } @@ -865,7 +865,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (current == null) { if (entry.getKey().longValue() != _mshostId.longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + logger.debug("Detected management node left {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } removedNodeList.add(entry.getValue()); } @@ -873,15 +874,16 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (current.getRunid() == 0) { if (entry.getKey().longValue() != _mshostId.longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + - entry.getValue().getServiceIP()); + logger.debug("Detected management node left because of invalidated session {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } invalidatedNodeList.add(entry.getValue()); } } else { if (entry.getValue().getRunid() != current.getRunid()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + logger.debug("Detected management node left and rejoined quickly {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } entry.getValue().setRunid(current.getRunid()); @@ -954,7 +956,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C final ManagementServerHostVO mshost = it.next(); // Check if peer state is Up in the period if (!_mshostPeerDao.isPeerUpState(_mshostId, mshost.getId(), new Date(cutTime.getTime() - HeartbeatThreshold.value()))) { - logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and did not send node status to this node"); + logger.warn("Management node {} is detected inactive by timestamp and did not send node status to this node", mshost); _activePeers.remove(mshost.getId()); try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); @@ -962,7 +964,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } else { - logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but sent node status to this node"); + logger.info("Management node {} is detected inactive by timestamp but sent node status to this node", mshost); it.remove(); } } @@ -979,7 +981,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C _activePeers.put(mshost.getId(), mshost); if (logger.isDebugEnabled()) { - logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); + logger.debug("Detected management node joined, {}, nodeIP:{}", mshost, mshost.getServiceIP()); } newNodeList.add(mshost); @@ -1032,7 +1034,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @DB public boolean start() { if (logger.isInfoEnabled()) { - logger.info("Starting Cluster manager, msid : " + _msId); + logger.info("Starting Cluster manager, msid: {}, mshost: {}", _msId, _mshost); } final ManagementServerHostVO mshost = Transaction.execute(new TransactionCallback() { @@ -1058,13 +1060,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C mshost.setUuid(UUID.randomUUID().toString()); _mshostDao.persist(mshost); if (logger.isInfoEnabled()) { - logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); + logger.info("New instance of management server {}, runId {} is being started", mshost, _runId); } } else { _mshostDao.update(mshost.getId(), _runId, NetUtils.getCanonicalHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); if (logger.isInfoEnabled()) { - logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); + logger.info("Management server {}, runId {} is being started", mshost, _runId); } } @@ -1072,9 +1074,10 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } }); + _mshost = mshost; _mshostId = mshost.getId(); if (logger.isInfoEnabled()) { - logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); + logger.info("Management server (host : {}) is being started at {}:{}", _mshost, _clusterNodeIP, _currentServiceAdapter.getServicePort()); } _mshostPeerDao.clearPeerInfo(_mshostId); @@ -1094,7 +1097,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C @DB public boolean stop() { if (logger.isInfoEnabled()) { - logger.info("Stopping Cluster manager, msid : " + _msId); + logger.info("Stopping Cluster manager, msid : {}, runId : {}, host : {}", _msId, _runId, _mshost); } if (_mshostId != null) { diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java index 2918ccd22d7..6c3b2a93994 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java @@ -32,6 +32,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "mshost") @@ -199,7 +200,9 @@ public class ManagementServerHostVO implements ManagementServerHost { @Override public String toString() { - return new StringBuilder("ManagementServer[").append("-").append(id).append("-").append(msid).append("-").append(state).append("]").toString(); + return String.format("ManagementServer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "msid")); } @Override diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index 82fea9749ff..c7f2daadc51 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1051,6 +1051,10 @@ public abstract class GenericDaoBase extends Compone } protected T findById(ID id, boolean removed, Boolean lock) { + if (id == null) { + return null; + } + StringBuilder sql = new StringBuilder(_selectByIdSql); if (!removed && _removed != null) { sql.append(" AND ").append(_removed.first()); diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java index 7a14f385fa1..c293de8b4dd 100644 --- a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.framework.events; import com.google.gson.Gson; import com.google.gson.annotations.Expose; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class Event { @@ -49,6 +50,13 @@ public class Event { setResourceUUID(resourceUUID); } + @Override + public String toString() { + return String.format("Event %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "eventId", "eventUuid", "eventType", "resourceType", "resourceUUID", "description")); + } + public Long getEventId() { return eventId; } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java index 6b85ae27f58..0f2c8d1736a 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java @@ -40,6 +40,7 @@ import org.apache.cloudstack.jobs.JobInfo; import com.cloud.utils.UuidUtils; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "async_job") @@ -384,26 +385,10 @@ public class AsyncJobVO implements AsyncJob, JobInfo { @Override public String toString() { - StringBuffer sb = new StringBuffer(); - sb.append("AsyncJobVO: {id:").append(getId()); - sb.append(", userId: ").append(getUserId()); - sb.append(", accountId: ").append(getAccountId()); - sb.append(", instanceType: ").append(getInstanceType()); - sb.append(", instanceId: ").append(getInstanceId()); - sb.append(", cmd: ").append(getCmd()); - sb.append(", cmdInfo: ").append(getCmdInfo()); - sb.append(", cmdVersion: ").append(getCmdVersion()); - sb.append(", status: ").append(getStatus()); - sb.append(", processStatus: ").append(getProcessStatus()); - sb.append(", resultCode: ").append(getResultCode()); - sb.append(", result: ").append(getResult()); - sb.append(", initMsid: ").append(getInitMsid()); - sb.append(", completeMsid: ").append(getCompleteMsid()); - sb.append(", lastUpdated: ").append(getLastUpdated()); - sb.append(", lastPolled: ").append(getLastPolled()); - sb.append(", created: ").append(getCreated()); - sb.append(", removed: ").append(getRemoved()); - sb.append("}"); - return sb.toString(); + return String.format("AsyncJob %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", + "userId", "accountId", "instanceType", "instanceId", "cmd", "cmdInfo", + "cmdVersion", "status", "processStatus", "resultCode", "result", "initMsid", + "completeMsid", "lastUpdated", "lastPolled", "created", "removed")); } } diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index eb03cfe0df4..e34778820cb 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -118,6 +118,7 @@ Requires: cryptsetup Requires: rng-tools Requires: (libgcrypt > 1.8.3 or libgcrypt20) Requires: (selinux-tools if qemu-tools) +Requires: sysstat Provides: cloud-agent Group: System Environment/Libraries %description agent diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index ec6674477b0..0ed658aa70d 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -95,7 +95,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { if (vmGroupMapping != null) { if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); + logger.debug("Processing affinity group {} of type 'ExplicitDedication' for VM: {}", _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()), vm); } long affinityGroupId = vmGroupMapping.getAffinityGroupId(); @@ -304,7 +304,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement DedicatedResourceVO dPod = _dedicatedDao.findByPodId(pod.getId()); if (dPod != null && !dedicatedResources.contains(dPod)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Avoiding POD %s [%s] because it is not dedicated.", pod.getName(), pod.getUuid())); + logger.debug(String.format("Avoiding POD %s because it is not dedicated.", pod)); } avoidList.addPod(pod.getId()); } else { @@ -345,7 +345,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement for (HostPodVO pod : pods) { if (podsInIncludeList != null && !podsInIncludeList.contains(pod.getId())) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Avoiding POD %s [%s], as it is not in include list.", pod.getName(), pod.getUuid())); + logger.debug(String.format("Avoiding POD %s, as it is not in include list.", pod)); } avoidList.addPod(pod.getId()); } diff --git a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java index b94cf49e4d9..b97b8e224ad 100644 --- a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java @@ -80,7 +80,7 @@ public class HostAffinityProcessor extends AffinityProcessorBase implements Affi */ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, DeploymentPlan plan, VirtualMachine vm, List vmList) { AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug("Processing affinity group {} for VM {}", group, vm); List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); groupVMIds.remove(vm.getId()); diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 4681ce4321e..bd29a48f258 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -94,7 +94,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug(String.format("Processing affinity group %s for VM %s", group, vm)); } List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -106,7 +106,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements if (groupVM.getHostId() != null) { avoid.addHost(groupVM.getHostId()); if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + logger.debug("Added host {} to avoid set, since VM {} is present on the host", groupVM.getHostId(), groupVM); } } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { @@ -114,8 +114,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { avoid.addHost(groupVM.getLastHostId()); if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + - " is present on the host, in Stopped state but has reserved capacity"); + logger.debug("Added host {} to avoid set, since VM {} is present on the host, in Stopped state but has reserved capacity", groupVM.getLastHostId(), groupVM); } } } @@ -155,8 +154,7 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { if (logger.isDebugEnabled()) { - logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + - " reserved on the same host " + plannedHostId); + logger.debug(String.format("Planned destination for VM %s conflicts with an existing VM %d reserved on the same host %s", vm, vmReservation.getVmId(), plannedDestination.getHost())); } return false; } diff --git a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java index f227a3ffc8d..49e3f60ed5d 100644 --- a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java +++ b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java @@ -77,7 +77,7 @@ public class NonStrictHostAffinityProcessor extends AffinityProcessorBase implem AffinityGroupVO group = affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug(String.format("Processing affinity group %s for VM: %s", group, vm)); } List groupVMIds = affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -95,17 +95,17 @@ public class NonStrictHostAffinityProcessor extends AffinityProcessorBase implem if (groupVM.getHostId() != null) { Integer priority = adjustHostPriority(plan, groupVM.getHostId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Updated host %s priority to %s , since VM %s is present on the host", - groupVM.getHostId(), priority, groupVM.getId())); + logger.debug(String.format("Updated host %s priority to %s, since VM %s is present on the host", + groupVM.getHostId(), priority, groupVM)); } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < vmCapacityReleaseInterval) { Integer priority = adjustHostPriority(plan, groupVM.getLastHostId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Updated host %s priority to %s , since VM %s" + + logger.debug(String.format("Updated host %s priority to %s, since VM %s" + " is present on the host, in %s state but has reserved capacity", - groupVM.getLastHostId(), priority, groupVM.getId(), groupVM.getState())); + groupVM.getLastHostId(), priority, groupVM, groupVM.getState())); } } } diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java index c87ff3dfc82..e93b8df39e9 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java @@ -98,11 +98,11 @@ public class SiocManagerImpl implements SiocManager { } if (storagePool.getDataCenterId() != zoneId) { - throw new Exception("Error: Storage pool '" + storagePool.getName() + "' is not in zone ID " + zoneId + "."); + throw new Exception(String.format("Error: Storage pool %s is not in zone %s.", storagePool, zone)); } if (!storagePool.getPoolType().equals(StoragePoolType.VMFS)) { - throw new Exception("Error: Storage pool '" + storagePool.getName() + "' does not represent a VMFS datastore."); + throw new Exception(String.format("Error: Storage pool %s does not represent a VMFS datastore.", storagePool)); } String lockName = zone.getUuid() + "-" + storagePool.getUuid(); @@ -193,7 +193,7 @@ public class SiocManagerImpl implements SiocManager { ManagedObjectReference morVm = nameToVm.get(vmName); if (morVm == null) { - String errMsg = "Error: The VM with ID " + instanceId + " could not be located (ManagedObjectReference)."; + String errMsg = String.format("Error: The VM %s could not be located (ManagedObjectReference).", vmInstance); throw new Exception(errMsg); } @@ -336,7 +336,7 @@ public class SiocManagerImpl implements SiocManager { } private String getInfoMsg(Volume volume, Integer newShares, Long newLimitIops) { - String msgPrefix = "VMware SIOC: Volume = " + volume.getName(); + String msgPrefix = String.format("VMware SIOC: Volume %s", volume); String msgNewShares = newShares != null ? "; New Shares = " + newShares : ""; @@ -354,8 +354,7 @@ public class SiocManagerImpl implements SiocManager { List volumes = volumeDao.findByInstance(vmInstance.getId()); if (volumes == null || volumes.size() == 0) { - String errMsg = "Error: The VMware virtual disk '" + disk + "' could not be mapped to a CloudStack volume. " + - "There were no volumes for the VM with the following ID: " + vmInstance.getId() + "."; + String errMsg = String.format("Error: The VMware virtual disk '%s' could not be mapped to a CloudStack volume. There were no volumes for the VM: %s.", disk, vmInstance); throw new Exception(errMsg); } diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index f162c51a703..d4b3cff0f5c 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -67,20 +67,20 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { - logger.debug("Creating VM backup for VM " + vm.getInstanceName() + " from backup offering " + backupOffering.getName()); + logger.debug("Creating VM backup for VM {} from backup offering {}", vm, backupOffering); ((VMInstanceVO) vm).setBackupExternalId("dummy-external-backup-id"); return true; } @Override public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { - logger.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring vm {} from backup {} on the Dummy Backup Provider", vm, backup); return true; } @Override public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { - logger.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring volume {} from backup {} on the Dummy Backup Provider", volumeUuid, backup); throw new CloudRuntimeException("Dummy plugin does not support this feature"); } @@ -101,7 +101,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean removeVMFromBackupOffering(VirtualMachine vm) { - logger.debug("Removing VM ID " + vm.getUuid() + " from backup offering by the Dummy Backup Provider"); + logger.debug(String.format("Removing VM %s from backup offering by the Dummy Backup Provider", vm)); return true; } @@ -112,7 +112,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean takeBackup(VirtualMachine vm) { - logger.debug("Starting backup for VM ID " + vm.getUuid() + " on Dummy provider"); + logger.debug(String.format("Starting backup for VM %s on Dummy provider", vm)); BackupVO backup = new BackupVO(); backup.setVmId(vm.getId()); diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java index 4a6725abdca..5d3d1a91933 100644 --- a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -107,7 +107,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co // Try to find any Up host in the same cluster for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(host.getClusterId())) { if (hostInCluster.getStatus() == Status.Up) { - LOG.debug("Found Host " + hostInCluster.getName()); + LOG.debug("Found Host {}", hostInCluster); return hostInCluster; } } @@ -115,7 +115,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co // Try to find any Host in the zone for (final HostVO hostInZone : hostDao.listByDataCenterIdAndHypervisorType(host.getDataCenterId(), Hypervisor.HypervisorType.KVM)) { if (hostInZone.getStatus() == Status.Up) { - LOG.debug("Found Host " + hostInZone.getName()); + LOG.debug("Found Host {}", hostInZone); return hostInZone; } } @@ -213,7 +213,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co List backedVolumes = backup.getBackedUpVolumes(); List volumes = backedVolumes.stream().map(volume -> volumeDao.findByUuid(volume.getUuid())).collect(Collectors.toList()); - LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm.getUuid(), backup.getUuid()); + LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup); BackupRepository backupRepository = getBackupRepository(vm, backup); final Host host = getLastVMHypervisorHost(vm); @@ -263,7 +263,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co Optional matchingVolume = getBackedUpVolumeInfo(backupSourceVm.getBackupVolumeList(), volumeUuid); Long backedUpVolumeSize = matchingVolume.isPresent() ? matchingVolume.get().getSize() : 0L; - LOG.debug("Restoring vm volume" + volumeUuid + "from backup " + backup.getUuid() + " on the NAS Backup Provider"); + LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", volume, backup); BackupRepository backupRepository = getBackupRepository(backupSourceVm, backup); VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), @@ -377,8 +377,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co vmBackupProtectedSize += backup.getProtectedSize(); } Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); - LOG.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); + LOG.debug("Metrics for VM {} is [backup size: {}, data size: {}].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize()); metrics.put(vm, vmBackupMetric); } return metrics; diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index 0e87ad33887..393e2911ac3 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -173,7 +173,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid List altClusterHosts = hostDao.findHypervisorHostInCluster(host.getClusterId()); for (final HostVO candidateClusterHost : altClusterHosts) { if ( candidateClusterHost.getStatus() == Status.Up ) { - LOG.debug("Found Host " + candidateClusterHost.getName()); + LOG.debug(String.format("Found Host %s", candidateClusterHost)); return candidateClusterHost; } } @@ -182,7 +182,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid List altZoneHosts = hostDao.findByDataCenterId(host.getDataCenterId()); for (final HostVO candidateZoneHost : altZoneHosts) { if ( candidateZoneHost.getStatus() == Status.Up && candidateZoneHost.getHypervisorType() == Hypervisor.HypervisorType.KVM ) { - LOG.debug("Found Host " + candidateZoneHost.getName()); + LOG.debug("Found Host " + candidateZoneHost); return candidateZoneHost; } } @@ -331,7 +331,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid final NetworkerBackup networkerBackup=getClient(zoneId).getNetworkerBackupInfo(externalBackupId); final String SSID = networkerBackup.getShortId(); - LOG.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Networker Backup Provider"); + LOG.debug(String.format("Restoring vm %s from backup %s on the Networker Backup Provider", vm, backup)); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -359,7 +359,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug("Starting Restore for VM ID " + vm.getUuid() + " and SSID" + SSID + " at " + restoreJobStart); + LOG.debug(String.format("Starting Restore for VM %s and %s at %s", vm, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); @@ -387,7 +387,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid final String destinationNetworkerClient = hostVO.getName().split("\\.")[0]; Long restoredVolumeDiskSize = 0L; - LOG.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Networker Backup Provider"); + LOG.debug(String.format("Restoring volume %s with uuid %s from backup %s on the Networker Backup Provider", volume, volumeUuid, backup)); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -448,7 +448,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug("Starting Restore for Volume UUID " + volume.getUuid() + " and SSID" + SSID + " at " + restoreJobStart); + LOG.debug(String.format("Starting Restore for Volume UUID %s and SSID %s at %s", volume, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); @@ -505,18 +505,18 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid if ( Boolean.TRUE.equals(NetworkerClientVerboseLogs.value()) ) script.add("-v"); - LOG.debug("Starting backup for VM ID " + vm.getUuid() + " on Networker provider"); + LOG.debug("Starting backup for VM {} on Networker provider", vm); Date backupJobStart = new Date(); String saveTime = executeBackupCommand(hostVO, credentials.first(), credentials.second(), script.toString()); - LOG.info ("EMC Networker finished backup job for vm " + vm.getName() + " with saveset Time: " + saveTime); + LOG.info("EMC Networker finished backup job for vm {} with saveset Time: {}", vm, saveTime); BackupVO backup = getClient(vm.getDataCenterId()).registerBackupForVm(vm, backupJobStart, saveTime); if (backup != null) { backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); backupDao.persist(backup); return true; } else { - LOG.error("Could not register backup for vm " + vm.getName() + " with saveset Time: " + saveTime); + LOG.error("Could not register backup for vm {} with saveset Time: {}", vm, saveTime); // We need to handle this rare situation where backup is successful but can't be registered properly. return false; } @@ -558,8 +558,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid vmBackupProtectedSize+= vmNwBackup.getSize().getValue() / 1024L; } Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); - LOG.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); + LOG.debug(String.format("Metrics for VM [%s] is [backup size: %s, data size: %s].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); metrics.put(vm, vmBackupMetric); } return metrics; @@ -578,14 +577,14 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid Long vmBackupSize=0L; boolean backupExists = false; for (final Backup backupInDb : backupsInDb) { - LOG.debug("Checking if Backup with external ID " + backupInDb.getName() + " for VM " + backupInDb.getVmId() + "is valid"); + LOG.debug(String.format("Checking if Backup %s with external ID %s for VM %s is valid", backupsInDb, backupInDb.getName(), vm)); if ( networkerBackupId.equals(backupInDb.getExternalId()) ) { - LOG.debug("Found Backup with id " + backupInDb.getId() + " in both Database and Networker"); + LOG.debug(String.format("Found Backup %s in both Database and Networker", backupInDb)); backupExists = true; removeList.remove(backupInDb.getId()); if (metric != null) { - LOG.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backupInDb.getUuid(), backupInDb.getExternalId(), backupInDb.getSize(), backupInDb.getProtectedSize(), + LOG.debug(String.format("Update backup [%s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", + backupInDb, backupInDb.getSize(), backupInDb.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); ((BackupVO) backupInDb).setSize(metric.getBackupSize()); ((BackupVO) backupInDb).setProtectedSize(metric.getDataSize()); @@ -627,12 +626,12 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid strayBackup.setAccountId(vm.getAccountId()); strayBackup.setDomainId(vm.getDomainId()); strayBackup.setZoneId(vm.getDataCenterId()); - LOG.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " - + "domain_id: %s, zone_id: %s].", strayBackup.getUuid(), strayBackup.getVmId(), strayBackup.getExternalId(), + LOG.debug(String.format("Creating a new entry in backups: [id: %s, uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " + + "domain_id: %s, zone_id: %s].", strayBackup.getId(), strayBackup.getUuid(), strayBackup.getVmId(), strayBackup.getExternalId(), strayBackup.getType(), strayBackup.getDate(), strayBackup.getBackupOfferingId(), strayBackup.getAccountId(), strayBackup.getDomainId(), strayBackup.getZoneId())); backupDao.persist(strayBackup); - LOG.warn("Added backup found in provider with ID: [" + strayBackup.getId() + "]"); + LOG.warn("Added backup found in provider [" + strayBackup + "]"); } else { LOG.debug ("Backup is in progress, skipping addition for this run"); } diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java index 8aecaa26023..36bfd456475 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java @@ -298,7 +298,7 @@ public class NetworkerClient { public ArrayList getBackupsForVm(VirtualMachine vm) { SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyy-MM-dd'T'HH:mm:ss"); - LOG.debug("Trying to list EMC Networker backups for VM " + vm.getName()); + LOG.debug(String.format("Trying to list EMC Networker backups for VM %s", vm)); try { final HttpResponse response = get("/global/backups/?q=name:" + vm.getName()); checkResponseOK(response); @@ -310,7 +310,7 @@ public class NetworkerClient { return backupsTaken; } for (final NetworkerBackup backup : networkerBackups.getBackups()) { - LOG.debug("Found Backup " + backup.getId()); + LOG.debug(String.format("Found Backup %s", backup)); // Backups that have expired on the EMC Networker but not removed yet will not be added try { Date backupRetentionTime = formatterDateTime.parse(backup.getRetentionTime()); @@ -345,7 +345,7 @@ public class NetworkerClient { return policies; } for (final ProtectionPolicy protectionPolicy : protectionPolicies.getProtectionPolicies()) { - LOG.debug("Found Protection Policy:" + protectionPolicy.getName()); + LOG.debug(String.format("Found Protection Policy: %s", protectionPolicy)); policies.add(new NetworkerBackupOffering(protectionPolicy.getName(), protectionPolicy.getResourceId().getId())); } return policies; diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index 4750e3264aa..c120d8bd599 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -173,7 +173,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, final String clonedJobName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); if (!client.cloneVeeamJob(parentJob, clonedJobName)) { - logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " but will check the list of jobs again if it was eventually succeeded."); + logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering [id: {}, name: {}] but will check the list of jobs again if it was eventually succeeded.", backupOffering.getExternalId(), backupOffering.getName()); } for (final BackupOffering job : client.listJobs()) { @@ -182,7 +182,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, if (BooleanUtils.isTrue(clonedJob.getScheduleConfigured()) && !clonedJob.getScheduleEnabled()) { client.toggleJobSchedule(clonedJob.getId()); } - logger.debug("Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " found, now trying to assign the VM to the job."); + logger.debug("Veeam job (backup offering) for backup offering [id: {}, name: {}] found, now trying to assign the VM to the job.", backupOffering.getExternalId(), backupOffering.getName()); final VmwareDatacenter vmwareDC = findVmwareDatacenterForVM(vm); if (client.addVMToVeeamJob(job.getExternalId(), vm.getInstanceName(), vmwareDC.getVcenterHost())) { ((VMInstanceVO) vm).setBackupExternalId(job.getExternalId()); @@ -229,7 +229,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, public boolean deleteBackup(Backup backup, boolean forced) { VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); if (vm == null) { - throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), backup.getExternalId())); + throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, name: %s, externalId: %s].", backup.getUuid(), backup.getName(), backup.getExternalId())); } if (!forced) { logger.debug(String.format("Veeam backup provider does not have a safe way to remove a single restore point, which results in all backup chain being removed. " @@ -315,8 +315,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, } Metric metric = backendMetrics.get(vm.getUuid()); - logger.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), metric.getBackupSize(), metric.getDataSize())); + logger.debug("Metrics for VM [{}] is [backup size: {}, data size: {}].", vm, + metric.getBackupSize(), metric.getDataSize()); metrics.put(vm, metric); } return metrics; @@ -331,8 +331,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, for (final Backup backup : backupsInDb) { if (restorePoint.getId().equals(backup.getExternalId())) { if (metric != null) { - logger.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backup.getUuid(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); + logger.debug("Update backup with [id: {}, uuid: {}, name: {}, external id: {}] from [size: {}, protected size: {}] to [size: {}, protected size: {}].", + backup.getId(), backup.getUuid(), backup.getName(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize()); ((BackupVO) backup).setSize(metric.getBackupSize()); ((BackupVO) backup).setProtectedSize(metric.getDataSize()); @@ -348,7 +348,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, public void syncBackups(VirtualMachine vm, Backup.Metric metric) { List restorePoints = listRestorePoints(vm); if (CollectionUtils.isEmpty(restorePoints)) { - logger.debug(String.format("Can't find any restore point to VM: [uuid: %s, name: %s].", vm.getUuid(), vm.getInstanceName())); + logger.debug("Can't find any restore point to VM: {}", vm); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -379,9 +379,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); - logger.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " - + "domain_id: %s, zone_id: %s].", backup.getUuid(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), - backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId())); + logger.debug("Creating a new entry in backups: [id: {}, uuid: {}, name: {}, vm_id: {}, external_id: {}, type: {}, date: {}, backup_offering_id: {}, account_id: {}, " + + "domain_id: {}, zone_id: {}].", backup.getId(), backup.getUuid(), backup.getName(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId()); backupDao.persist(backup); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_BACKUP_CREATE, diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java index 1f2de8f3196..cbfe2fda592 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java @@ -61,7 +61,7 @@ public class VeeamBackupProviderTest { backupProvider.deleteBackup(backup, false); } catch (Exception e) { assertEquals(CloudRuntimeException.class, e.getClass()); - String expected = String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), "abc"); + String expected = String.format("Could not find any VM associated with the Backup [uuid: %s, name: null, externalId: %s].", backup.getUuid(), "abc"); assertEquals(expected , e.getMessage()); } } diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 4f1db396b7c..cd7dc2bbbad 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -142,7 +142,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(zoneId); //check if zone is dedicated if (dedicatedZone != null) { - logger.error("Zone " + dc.getName() + " is already dedicated"); + logger.error(String.format("Zone %s is already dedicated", dc)); throw new CloudRuntimeException("Zone " + dc.getName() + " is already dedicated"); } @@ -161,7 +161,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dPod.getAccountId().equals(accountId)) { podsToRelease.add(dPod); } else { - logger.error("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Pod %s under this Zone %s is dedicated to different account/domain", pod, dc)); throw new CloudRuntimeException("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -187,7 +187,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - logger.error("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Cluster %s under this Zone %s is dedicated to different account/domain", cluster, dc)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } @@ -214,7 +214,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Host %s under this Zone %s is dedicated to different account/domain", host, dc)); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -292,7 +292,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { DedicatedResourceVO dedicatedZoneOfPod = _dedicatedDao.findByZoneId(pod.getDataCenterId()); //check if pod is dedicated if (dedicatedPod != null) { - logger.error("Pod " + pod.getName() + " is already dedicated"); + logger.error(String.format("Pod %s is already dedicated", pod)); throw new CloudRuntimeException("Pod " + pod.getName() + " is already dedicated"); } @@ -302,7 +302,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedZoneOfPod.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfPod.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(pod.getDataCenterId()); - logger.error("Cannot dedicate Pod. Its zone is already dedicated"); + logger.error(String.format("Cannot dedicate Pod. Its zone %s is already dedicated", zone)); throw new CloudRuntimeException("Pod's Zone " + zone.getName() + " is already dedicated"); } } @@ -323,7 +323,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - logger.error("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Cluster %s under this Pod %s is dedicated to different account/domain", cluster, pod)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } @@ -350,7 +350,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Host %s under this Pod %s is dedicated to different account/domain", host, pod)); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -421,7 +421,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { //check if cluster is dedicated if (dedicatedCluster != null) { - logger.error("Cluster " + cluster.getName() + " is already dedicated"); + logger.error(String.format("Cluster %s is already dedicated", cluster)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " is already dedicated"); } @@ -430,8 +430,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { //can dedicate a cluster to an account/domain if pod is dedicated to parent-domain if (dedicatedPodOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - logger.error("Cannot dedicate Cluster. Its Pod is already dedicated"); HostPodVO pod = _podDao.findById(cluster.getPodId()); + logger.error(String.format("Cannot dedicate Cluster %s. Its Pod %s is already dedicated", cluster, pod)); throw new CloudRuntimeException("Cluster's Pod " + pod.getName() + " is already dedicated"); } } @@ -441,8 +441,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { //can dedicate a cluster to an account/domain if zone is dedicated to parent-domain if (dedicatedZoneOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - logger.error("Cannot dedicate Cluster. Its zone is already dedicated"); DataCenterVO zone = _zoneDao.findById(cluster.getDataCenterId()); + logger.error(String.format("Cannot dedicate Cluster %s. Its zone %s is already dedicated", cluster, zone)); throw new CloudRuntimeException("Cluster's Zone " + zone.getName() + " is already dedicated"); } } @@ -463,7 +463,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); + logger.error(String.format("Cannot dedicate Cluster %s to account %s", cluster, accountName)); throw new CloudRuntimeException("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); } } else { @@ -536,7 +536,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { DedicatedResourceVO dedicatedZoneOfHost = _dedicatedDao.findByZoneId(host.getDataCenterId()); if (dedicatedHost != null) { - logger.error("Host " + host.getName() + " is already dedicated"); + logger.error(String.format("Host %s is already dedicated", host)); throw new CloudRuntimeException("Host " + host.getName() + " is already dedicated"); } @@ -546,7 +546,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedClusterOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedClusterOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - logger.error("Host's Cluster " + cluster.getName() + " is already dedicated"); + logger.error(String.format("Host's Cluster %s is already dedicated", cluster)); throw new CloudRuntimeException("Host's Cluster " + cluster.getName() + " is already dedicated"); } } @@ -557,7 +557,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedPodOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { HostPodVO pod = _podDao.findById(host.getPodId()); - logger.error("Host's Pod " + pod.getName() + " is already dedicated"); + logger.error(String.format("Host's Pod %s is already dedicated", pod)); throw new CloudRuntimeException("Host's Pod " + pod.getName() + " is already dedicated"); } } @@ -568,7 +568,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (dedicatedZoneOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(host.getDataCenterId()); - logger.error("Host's Data Center " + zone.getName() + " is already dedicated"); + logger.error(String.format("Host's Data Center %s is already dedicated", zone)); throw new CloudRuntimeException("Host's Data Center " + zone.getName() + " is already dedicated"); } } @@ -576,7 +576,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); - checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, hostId); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -662,22 +662,22 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return vms; } - private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, Host host) { boolean suitable = true; - List allVmsOnHost = getVmsOnHost(hostId); + List allVmsOnHost = getVmsOnHost(host.getId()); if (accountId != null) { for (UserVmVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); - throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another account", host)); + throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); } } } else { for (UserVmVO vm : allVmsOnHost) { if (!domainIds.contains(vm.getDomainId())) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); - throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another domain", host)); + throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); } } @@ -688,7 +688,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { boolean suitable = true; for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host.getId()); + checkHostSuitabilityForExplicitDedication(accountId, domainIds, host); } return suitable; } @@ -939,7 +939,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { public void doInTransactionWithoutResult(TransactionStatus status) { Long resourceId = resourceFinal.getId(); if (!_dedicatedDao.remove(resourceId)) { - throw new CloudRuntimeException("Failed to delete Resource " + resourceId); + throw new CloudRuntimeException(String.format("Failed to delete Resource %s", resourceFinal)); } if (zoneId != null) { // remove the domainId set in zone diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index bd1bcf06101..b971b3b8596 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -156,13 +156,16 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy for (VMInstanceVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " + "running instances of another account"); + logger.info(String.format("Host %d for vm %s found to be unsuitable for " + + "implicit dedication as it is running instances of another account", + vm.getHostId(), vm)); suitable = false; break; } else { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it " + - "is running instances of this account which haven't been created using implicit dedication."); + logger.info(String.format("Host %d for vm %s found to be unsuitable for " + + "implicit dedication as it is running instances of this account which" + + " haven't been created using implicit dedication.", vm.getHostId(), vm)); suitable = false; break; } @@ -177,11 +180,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy return false; for (VMInstanceVO vm : allVmsOnHost) { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { - logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit."); + logger.info(String.format("Host %d found to be running a vm %s created by a planner other than implicit.", vm.getHostId(), vm)); createdByImplicitStrict = false; break; } else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) { - logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + " in preferred mode."); + logger.info(String.format("Host %d found to be running a vm %s created by an implicit planner in preferred mode.", vm.getHostId(), vm)); createdByImplicitStrict = false; break; } diff --git a/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java b/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java index c799ac872c0..b6a5ed1aac1 100644 --- a/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java +++ b/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.VirtualMachine; @@ -39,20 +40,21 @@ public class Balanced extends AdapterBase implements ClusterDrsAlgorithm { private static final Logger logger = LogManager.getLogger(Balanced.class); @Override - public boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException { + public boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException { + long clusterId = cluster.getId(); double threshold = getThreshold(clusterId); Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); String drsMetric = ClusterDrsAlgorithm.getClusterDrsMetric(clusterId); String metricType = ClusterDrsAlgorithm.getDrsMetricType(clusterId); Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId); if (imbalance > threshold) { - logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio)); + logger.debug("Cluster {} needs DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio); return true; } else { - logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio)); + logger.debug("Cluster {} does not need DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio); return false; } } @@ -67,15 +69,15 @@ public class Balanced extends AdapterBase implements ClusterDrsAlgorithm { } @Override - public Ternary getMetrics(long clusterId, VirtualMachine vm, + public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); + Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); - logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s", - clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid())); + logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); // This needs more research to determine the cost and benefit of a migration // TODO: Cost should be a factor of the VM size and the host capacity diff --git a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java index a1562b52e38..d5160671958 100644 --- a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java +++ b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.cluster; +import com.cloud.dc.ClusterVO; import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.Ternary; @@ -61,6 +62,8 @@ public class BalancedTest { ServiceOfferingVO serviceOffering; + ClusterVO cluster; + long clusterId = 1L; Map> hostVmMap; @@ -73,6 +76,7 @@ public class BalancedTest { public void setUp() throws NoSuchFieldException, IllegalAccessException { closeable = MockitoAnnotations.openMocks(this); + cluster = Mockito.mock(ClusterVO.class); vm1 = Mockito.mock(VirtualMachine.class); vm2 = Mockito.mock(VirtualMachine.class); @@ -84,10 +88,10 @@ public class BalancedTest { hostVmMap.put(2L, Arrays.asList(vm2, vm3)); serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(cluster.getId()).thenReturn(clusterId); Mockito.when(vm3.getHostId()).thenReturn(2L); - Mockito.when(destHost.getId()).thenReturn(1L); - Mockito.when(serviceOffering.getCpu()).thenReturn(1); Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); Mockito.when(serviceOffering.getRamSize()).thenReturn(1024); @@ -133,7 +137,7 @@ public class BalancedTest { @Test public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - assertFalse(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertFalse(balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* @@ -143,14 +147,14 @@ public class BalancedTest { @Test public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - assertTrue(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertTrue(balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* 3. cluster with "unknown" metric */ @Test public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown"); - assertThrows(ConfigurationException.class, () -> balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertThrows(ConfigurationException.class, () -> balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /** @@ -179,7 +183,7 @@ public class BalancedTest { @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - Ternary result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.0, result.first(), 0.01); assertEquals(0.0, result.second(), 0.0); @@ -193,7 +197,7 @@ public class BalancedTest { @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - Ternary result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); diff --git a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java index 3a8befa628b..70c5acd951f 100644 --- a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java +++ b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.VirtualMachine; @@ -40,8 +41,9 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { private static final Logger logger = LogManager.getLogger(Condensed.class); @Override - public boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException { + public boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException { + long clusterId = cluster.getId(); double threshold = getThreshold(clusterId); Float skipThreshold = ClusterDrsImbalanceSkipThreshold.valueIn(clusterId); Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, skipThreshold); @@ -50,12 +52,12 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId); if (imbalance < threshold) { - logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold)); + logger.debug("Cluster {} needs DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {} SkipThreshold: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold); return true; } else { - logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold)); + logger.debug("Cluster {} does not need DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {} SkipThreshold: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold); return false; } } @@ -70,16 +72,16 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { } @Override - public Ternary getMetrics(long clusterId, VirtualMachine vm, + public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), + Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); - logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s", - clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid())); + logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); // This needs more research to determine the cost and benefit of a migration // TODO: Cost should be a factor of the VM size and the host capacity diff --git a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java index d5072774534..3d3896704da 100644 --- a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java +++ b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.cluster; +import com.cloud.dc.ClusterVO; import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.Ternary; @@ -61,6 +62,8 @@ public class CondensedTest { ServiceOfferingVO serviceOffering; + ClusterVO cluster; + long clusterId = 1L; Map> hostVmMap; @@ -74,6 +77,8 @@ public class CondensedTest { public void setUp() throws NoSuchFieldException, IllegalAccessException { closeable = MockitoAnnotations.openMocks(this); + cluster = Mockito.mock(ClusterVO.class); + vm1 = Mockito.mock(VirtualMachine.class); vm2 = Mockito.mock(VirtualMachine.class); vm3 = Mockito.mock(VirtualMachine.class); // vm to migrate @@ -84,10 +89,10 @@ public class CondensedTest { hostVmMap.put(2L, Arrays.asList(vm2, vm3)); serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(cluster.getId()).thenReturn(clusterId); Mockito.when(vm3.getHostId()).thenReturn(2L); - Mockito.when(destHost.getId()).thenReturn(1L); - Mockito.when(serviceOffering.getCpu()).thenReturn(1); Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); Mockito.when(serviceOffering.getRamSize()).thenReturn(512); @@ -134,7 +139,7 @@ public class CondensedTest { @Test public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - assertTrue(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertTrue(condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* @@ -144,14 +149,14 @@ public class CondensedTest { @Test public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - assertFalse(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertFalse(condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* 3. cluster with "unknown" metric */ @Test public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown"); - assertThrows(ConfigurationException.class, () -> condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertThrows(ConfigurationException.class, () -> condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /** @@ -180,7 +185,7 @@ public class CondensedTest { @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - Ternary result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.0, result.first(), 0.0); assertEquals(0, result.second(), 0.0); @@ -194,7 +199,7 @@ public class CondensedTest { @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - Ternary result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(-0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java index 58b265a99c0..97d00c45e4d 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java @@ -103,8 +103,7 @@ public class WebhookServiceImpl extends ManagerBase implements WebhookService, W return jobs; } if (event.getResourceAccountId() == null) { - logger.warn("Skipping delivering event [ID: {}, description: {}] to any webhook as account ID is missing", - event.getEventId(), event.getDescription()); + logger.warn("Skipping delivering event {} to any webhook as account ID is missing", event); throw new EventBusException(String.format("Account missing for the event ID: %s", event.getEventUuid())); } List domainIds = new ArrayList<>(); @@ -327,7 +326,7 @@ public class WebhookServiceImpl extends ManagerBase implements WebhookService, W } long deliveriesLimit = WebhookDeliveriesLimit.value(); logger.debug("Clearing old deliveries for webhooks with limit={} using management server {}", - deliveriesLimit, msHost.getMsid()); + deliveriesLimit, msHost); long processed = cleanupOldWebhookDeliveries(deliveriesLimit); logger.debug("Cleared old deliveries with limit={} for {} webhooks", deliveriesLimit, processed); } catch (Exception e) { diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java index e36f870c8d9..f0fb3e1cc9b 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java @@ -173,8 +173,8 @@ public class WebhookDeliveryJoinVO extends BaseViewVO implements InternalIdentit @Override public String toString() { - return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "webhookId", "startTime", "success")); + return String.format("WebhookDelivery %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "webhookId", "webhookName", "startTime", "success")); } public WebhookDeliveryJoinVO() { diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java index e39f57a2663..e266ea5d7c4 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java @@ -130,7 +130,7 @@ public class WebhookDeliveryVO implements WebhookDelivery { @Override public String toString() { - return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("WebhookDelivery %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "webhookId", "startTime", "success")); } diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java index f1708609587..9ff15d34a9c 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java @@ -225,7 +225,7 @@ public class WebhookJoinVO implements ControlledViewEntity { @Override public String toString() { - return String.format("Webhook [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("Webhook %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "name")); } diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java index 93e3e801423..852cdf740d1 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java @@ -191,7 +191,7 @@ public class WebhookVO implements Webhook { @Override public String toString() { - return String.format("Webhook [%s]",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("Webhook %s",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "name", "payloadUrl")); } diff --git a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index a71ae26e670..53e44ab5aab 100644 --- a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -75,7 +75,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { if (logger.isDebugEnabled()) { logger.debug(String.format("Found %d hosts %s with type: %s, zone ID: %d, pod ID: %d, cluster ID: %s, offering host tag(s): %s, template tag: %s", taggedHosts.size(), - (taggedHosts.isEmpty() ? "" : String.format("(%s)", StringUtils.join(taggedHosts.stream().map(HostVO::getId).toArray(), ","))), + (taggedHosts.isEmpty() ? "" : String.format("(%s)", StringUtils.join(taggedHosts.stream().map(HostVO::toString).toArray(), ","))), type.name(), dcId, podId, clusterId, offeringHostTag, templateTag)); } return taggedHosts; @@ -139,19 +139,19 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { } if (avoid.shouldAvoid(host)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + logger.debug(String.format("Host %s is in avoid set, skipping this and trying other available hosts", host)); } continue; } Pair cpuCapabilityAndCapacity = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { if (logger.isDebugEnabled()) { - logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + logger.debug(String.format("Not using host %s; host has cpu capability? %s, host has capacity? %s", host, cpuCapabilityAndCapacity.first(), cpuCapabilityAndCapacity.second())); } continue; } if (logger.isDebugEnabled()) { - logger.debug("Found a suitable host, adding to list: " + host.getId()); + logger.debug(String.format("Found a suitable host, adding to list: %s", host)); } suitableHosts.add(host); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 321369b24b9..f918f66941e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -258,7 +258,7 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R List deadVms = _vmDao.listByLastHostId(host.getId()); for (VMInstanceVO vm : deadVms) { if (vm.getState() == State.Running || vm.getHostId() != null) { - throw new CloudRuntimeException("VM " + vm.getId() + "is still running on host " + host.getId()); + throw new CloudRuntimeException(String.format("VM %s is still running on host %s", vm, host)); } _vmDao.remove(vm.getId()); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index 318ac225c8c..83199b5f51c 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -76,12 +76,12 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (vm.getLastHostId() != null && haVmTag == null) { - HostVO h = _hostDao.findById(vm.getLastHostId()); - DataCenter dc = _dcDao.findById(h.getDataCenterId()); - Pod pod = _podDao.findById(h.getPodId()); - Cluster c = _clusterDao.findById(h.getClusterId()); - logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); - return new DeployDestination(dc, pod, c, h); + HostVO host = _hostDao.findById(vm.getLastHostId()); + DataCenter dc = _dcDao.findById(host.getDataCenterId()); + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + logger.debug("Start baremetal vm {} on last stayed host {}", vm, host); + return new DeployDestination(dc, pod, cluster, host); } if (haVmTag != null) { @@ -124,22 +124,22 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { if (haVmTag == null) { hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); } else { - logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + + logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster " + cluster + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId()); return null; } - for (HostVO h : hosts) { - long cluster_id = h.getClusterId(); + for (HostVO host : hosts) { + long cluster_id = host.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - logger.debug("Find host " + h.getId() + " has enough capacity"); - DataCenter dc = _dcDao.findById(h.getDataCenterId()); - Pod pod = _podDao.findById(h.getPodId()); - return new DeployDestination(dc, pod, cluster, h); + if (_capacityMgr.checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + logger.debug(String.format("Find host %s has enough capacity", host)); + DataCenter dc = _dcDao.findById(host.getDataCenterId()); + Pod pod = _podDao.findById(host.getPodId()); + return new DeployDestination(dc, pod, cluster, host); } } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java index bf991b77e1c..d90ea6c3731 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -98,10 +98,10 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage if (newState == State.Starting) { host.setDetail("vmName", vo.getInstanceName()); - logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); + logger.debug(String.format("Add vmName %s to host %s details", host.getDetail("vmName"), host)); } else { if (host.getDetail("vmName") != null && host.getDetail("vmName").equalsIgnoreCase(vo.getInstanceName())) { - logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); + logger.debug(String.format("Remove vmName %s from host %s details", host.getDetail("vmName"), host)); host.getDetails().remove("vmName"); } } @@ -142,13 +142,13 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage } if (State.Starting != vm.getState()) { - throw new CloudRuntimeException(String.format("baremetal instance[name:%s, state:%s] is not in state of Starting", vmName, vm.getState())); + throw new CloudRuntimeException(String.format("baremetal instance %s [state:%s] is not in state of Starting", vm, vm.getState())); } vm.setState(State.Running); vm.setLastHostId(vm.getHostId()); vmDao.update(vm.getId(), vm); - logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", - vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress())); + logger.debug(String.format("received baremetal provision done notification for vm %s running on host %s [mac:%s, ip:%s]", + vm, host, host.getPrivateMacAddress(), host.getPrivateIpAddress())); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java index d820fd5b6d3..3d79b9efdd1 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java @@ -66,7 +66,7 @@ public class HypervInvestigator extends AdapterBase implements Investigator { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId(), e); + logger.debug(String.format("Failed to send command to host: %s", neighbor), e); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java index 283f4dc0c96..a5947238bf6 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java @@ -136,7 +136,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer } if (logger.isDebugEnabled()) { - logger.debug("Setting up host " + agentId); + logger.debug(String.format("Setting up host %s", agent)); } HostEnvironment env = new HostEnvironment(); @@ -161,14 +161,14 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer if (reason == null) { reason = " details were null"; } - logger.warn("Unable to setup agent " + agentId + " due to " + reason); + logger.warn(String.format("Unable to setup agent %s due to %s", agent, reason)); } // Error handling borrowed from XcpServerDiscoverer, may need to be // updated. } catch (AgentUnavailableException e) { - logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn(String.format("Unable to setup agent %s because it became unavailable.", agent), e); } catch (OperationTimedoutException e) { - logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn(String.format("Unable to setup agent %s because it timed out", agent), e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -256,7 +256,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer } logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDirectConnectResource for zone/pod/cluster " + dcId + "/" + podId + "/" + - clusterId); + cluster); // Some Hypervisors organise themselves in pools. // The startup command tells us what pool they are using. @@ -391,7 +391,7 @@ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer return null; } - logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); + logger.info(String.format("Host: %s connected with hypervisor type: %s. Checking CIDR...", host, HypervisorType.Hyperv)); HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java index c00ee70bf13..d488ee2058f 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java @@ -123,7 +123,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru { String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId); nicTo.setMac(mac); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId); + throw new CloudRuntimeException(String.format("unable to allocate mac address on network: %s", network.getUuid())); } nicTo.setDns1(profile.getIPv4Dns1()); nicTo.setDns2(profile.getIPv4Dns2()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java index 8fc74826242..eb64f4bc439 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java @@ -60,7 +60,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { return haManager.isVMAliveOnHost(host); } Status status = isAgentAlive(host); - logger.debug("HA: HOST is ineligible legacy state " + status + " for host " + host.getId()); + logger.debug("HA: HOST is ineligible legacy state {} for host {}", status, host); if (status == null) { throw new UnknownVM(); } @@ -88,8 +88,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { storageSupportHA = storageSupportHa(zonePools); } if (!storageSupportHA) { - logger.warn( - "Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation."); + logger.warn("Agent investigation was requested on host {}, but host does not support investigation because it has no NFS storage. Skipping investigation.", agent); return Status.Disconnected; } @@ -104,7 +103,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { hostStatus = answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + agent.getId()); + logger.debug("Failed to send command to host: {}", agent); } if (hostStatus == null) { hostStatus = Status.Disconnected; @@ -116,18 +115,18 @@ public class KVMInvestigator extends AdapterBase implements Investigator { || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; } - logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); + logger.debug("Investigating host:{} via neighbouring host:{}", agent, neighbor); try { Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); if (answer != null) { neighbourStatus = answer.getResult() ? Status.Down : Status.Up; - logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); + logger.debug("Neighbouring host:{} returned status:{} for the investigated host:{}", neighbor, neighbourStatus, agent); if (neighbourStatus == Status.Up) { break; } } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: {}", neighbor); } } if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { @@ -136,7 +135,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator { if (neighbourStatus == Status.Down && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { hostStatus = Status.Down; } - logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId()); + logger.debug("HA: HOST is ineligible legacy state {} for host {}", hostStatus, agent); return hostStatus; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java index d00f5b540e2..419b5449258 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java @@ -40,7 +40,8 @@ public final class LibvirtGetStorageStatsCommandWrapper extends CommandWrapper commands = new ArrayList<>(); + commands.add(new String[]{ + Script.getExecutableAbsolutePath("bash"), + "-c", + String.format( + "%s %s | %s 'NR==2 {print $1}'", + Script.getExecutableAbsolutePath("df"), + pool.getLocalPath(), + Script.getExecutableAbsolutePath("awk") + ) + }); + String result = Script.executePipedCommands(commands, 1000).second(); + if (StringUtils.isBlank(result)) { + return; + } + result = result.trim(); + commands.add(new String[]{ + Script.getExecutableAbsolutePath("bash"), + "-c", + String.format( + "%s -z %s 1 2 | %s 'NR==7 {print $2}'", + Script.getExecutableAbsolutePath("iostat"), + result, + Script.getExecutableAbsolutePath("awk") + ) + }); + result = Script.executePipedCommands(commands, 10000).second(); + logger.trace("Pool used IOPS result: {}", result); + if (StringUtils.isBlank(result)) { + return; + } + try { + double doubleValue = Double.parseDouble(result); + pool.setUsedIops((long) doubleValue); + logger.debug("Updated used IOPS: {} for pool: {}", pool.getUsedIops(), pool.getName()); + } catch (NumberFormatException e) { + logger.warn(String.format("Unable to parse retrieved used IOPS: %s for pool: %s", result, + pool.getName())); + } + } + @Override public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { logger.info("Trying to fetch storage pool " + uuid + " from libvirt"); @@ -591,6 +639,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } pool.setCapacity(storage.getInfo().capacity); pool.setUsed(storage.getInfo().allocation); + updateLocalPoolIops(pool); pool.setAvailable(storage.getInfo().available); logger.debug("Successfully refreshed pool " + uuid + diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 52adc59cbe7..8e5af7c613d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -20,14 +20,13 @@ import java.io.File; import java.util.List; import java.util.Map; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import org.libvirt.StoragePool; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import com.cloud.agent.api.to.HostTO; import com.cloud.agent.properties.AgentProperties; @@ -44,6 +43,8 @@ public class LibvirtStoragePool implements KVMStoragePool { protected String uuid; protected long capacity; protected long used; + protected Long capacityIops; + protected Long usedIops; protected long available; protected String name; protected String localPath; @@ -82,20 +83,38 @@ public class LibvirtStoragePool implements KVMStoragePool { this.used = used; } - public void setAvailable(long available) { - this.available = available; - } - @Override public long getUsed() { return this.used; } + @Override + public Long getCapacityIops() { + return capacityIops; + } + + public void setCapacityIops(Long capacityIops) { + this.capacityIops = capacityIops; + } + + @Override + public Long getUsedIops() { + return usedIops; + } + + public void setUsedIops(Long usedIops) { + this.usedIops = usedIops; + } + @Override public long getAvailable() { return this.available; } + public void setAvailable(long available) { + this.available = available; + } + public StoragePoolType getStoragePoolType() { return this.type; } @@ -328,7 +347,7 @@ public class LibvirtStoragePool implements KVMStoragePool { @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString(); + return String.format("LibvirtStoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "path")); } @Override diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 19d8378eb78..0cf8ce0018d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -38,6 +38,7 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -604,7 +605,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { } public String toString() { - return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId()); + return String.format("AddressInfo %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "type", "address", "connectionId")); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java index 81daabf59d7..b937be5265b 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java @@ -73,12 +73,12 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.RESET, null); return resp.getSuccess(); } else { - logger.warn("OOBM recover operation failed for the host " + r.getName()); + logger.warn("OOBM recover operation failed for the host {}", r); return false; } } catch (Exception e){ - logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); - throw new HARecoveryException(" OOBM service is not configured or enabled for this host " + r.getName(), e); + logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); + throw new HARecoveryException(String.format(" OOBM service is not configured or enabled for this host %s", r), e); } } @@ -90,12 +90,12 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.OFF, null); return resp.getSuccess(); } else { - logger.warn("OOBM fence operation failed for this host " + r.getName()); + logger.warn("OOBM fence operation failed for this host {}", r); return false; } } catch (Exception e){ - logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); - throw new HAFenceException("OBM service is not configured or enabled for this host " + r.getName() , e); + logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); + throw new HAFenceException(String.format("OBM service is not configured or enabled for this host %s", r.getName()), e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index 10d684bbdd3..31f87d7e044 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -21,6 +21,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckOnHostCommand; import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand; +import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.host.Host; @@ -51,6 +52,8 @@ import java.util.List; public class KVMHostActivityChecker extends AdapterBase implements ActivityCheckerInterface, HealthCheckerInterface { + @Inject + private ClusterDao clusterDao; @Inject private VolumeDao volumeDao; @Inject @@ -67,7 +70,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck @Override public boolean isActive(Host r, DateTime suspectTime) throws HACheckerException { try { - return isVMActivtyOnHost(r, suspectTime); + return isVMActivityOnHost(r, suspectTime); } catch (HACheckerException e) { //Re-throwing the exception to avoid poluting the 'HACheckerException' already thrown throw e; @@ -146,7 +149,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck return hostStatus == Status.Up; } - private boolean isVMActivtyOnHost(Host agent, DateTime suspectTime) throws HACheckerException { + private boolean isVMActivityOnHost(Host agent, DateTime suspectTime) throws HACheckerException { if (agent.getHypervisorType() != Hypervisor.HypervisorType.KVM && agent.getHypervisorType() != Hypervisor.HypervisorType.LXC) { throw new IllegalStateException(String.format("Calling KVM investigator for non KVM Host of type [%s].", agent.getHypervisorType())); } @@ -155,7 +158,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck for (StoragePool pool : poolVolMap.keySet()) { activityStatus = verifyActivityOfStorageOnHost(poolVolMap, pool, agent, suspectTime, activityStatus); if (!activityStatus) { - logger.warn(String.format("It seems that the storage pool [%s] does not have activity on %s.", pool.getId(), agent.toString())); + logger.warn("It seems that the storage pool [{}] does not have activity on {}.", pool, agent); break; } } @@ -167,20 +170,20 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck List volume_list = poolVolMap.get(pool); final CheckVMActivityOnStoragePoolCommand cmd = new CheckVMActivityOnStoragePoolCommand(agent, pool, volume_list, suspectTime); - logger.debug(String.format("Checking VM activity for %s on storage pool [%s].", agent.toString(), pool.getId())); + logger.debug("Checking VM activity for {} on storage pool [{}].", agent.toString(), pool); try { Answer answer = storageManager.sendToPool(pool, getNeighbors(agent), cmd); if (answer != null) { activityStatus = !answer.getResult(); - logger.debug(String.format("%s %s activity on storage pool [%s]", agent.toString(), activityStatus ? "has" : "does not have", pool.getId())); + logger.debug("{} {} activity on storage pool [{}]", agent.toString(), activityStatus ? "has" : "does not have", pool); } else { - String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool.getId()); + String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool); logger.debug(message); throw new IllegalStateException(message); } } catch (StorageUnavailableException e){ - String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool.getId(), agent.toString()); + String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool, agent); logger.warn(message, e); throw new HACheckerException(message, e); } @@ -191,15 +194,15 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck List vm_list = vmInstanceDao.listByHostId(agent.getId()); List volume_list = new ArrayList(); for (VirtualMachine vm : vm_list) { - logger.debug(String.format("Retrieving volumes of VM [%s]...", vm.getId())); + logger.debug("Retrieving volumes of VM [{}]...", vm); List vm_volume_list = volumeDao.findByInstance(vm.getId()); volume_list.addAll(vm_volume_list); } HashMap> poolVolMap = new HashMap>(); for (Volume vol : volume_list) { - logger.debug(String.format("Retrieving storage pool [%s] of volume [%s]...", vol.getPoolId(), vol.getId())); StoragePool sp = storagePool.findById(vol.getPoolId()); + logger.debug("Retrieving storage pool [{}] of volume [{}]...", sp, vol); if (!poolVolMap.containsKey(sp)) { List list = new ArrayList(); list.add(vol); @@ -215,7 +218,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck public long[] getNeighbors(Host agent) { List neighbors = new ArrayList(); List cluster_hosts = resourceManager.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); - logger.debug(String.format("Retrieving all \"Up\" hosts from cluster [%s]...", agent.getClusterId())); + logger.debug("Retrieving all \"Up\" hosts from cluster [{}]...", clusterDao.findById(agent.getClusterId())); for (HostVO host : cluster_hosts) { if (host.getId() == agent.getId() || (host.getHypervisorType() != Hypervisor.HypervisorType.KVM && host.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java index c2bbff7efb0..88346abd017 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java @@ -17,18 +17,22 @@ package com.cloud.hypervisor.kvm.storage; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.never; + import java.util.HashMap; import java.util.Map; import java.util.UUID; -import com.cloud.utils.exception.CloudRuntimeException; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.libvirt.Connect; import org.libvirt.StoragePool; -import org.libvirt.StoragePoolInfo; +import org.mockito.Mock; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; @@ -38,6 +42,9 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolDef; import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; @RunWith(MockitoJUnitRunner.class) public class LibvirtStorageAdaptorTest { @@ -46,6 +53,11 @@ public class LibvirtStorageAdaptorTest { private AutoCloseable closeable; + @Mock + LibvirtStoragePool mockPool; + + MockedStatic