diff --git a/.asf.yaml b/.asf.yaml index 4d979a18833..c052077c753 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -60,6 +60,7 @@ github: - bernardodemarco - abh1sar - FelipeM525 + - lucas-a-martins protected_branches: ~ diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 8044cc041df..10f05a1adfd 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -26,6 +26,13 @@ body: attributes: label: problem value: The long description of your problem +- type: markdown + attributes: + value: "## What versions of cloudstack and any infra components are you using" +- type: textarea + attributes: + label: versions + value: The versions of ACS, hypervisors, storage, network etc.. - type: textarea attributes: label: The steps to reproduce the bug diff --git a/.python-version b/.python-version index d70c8f8d89f..c8cfe395918 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.6 +3.10 diff --git a/README.md b/README.md index cc71c848d5d..f66a4dc6f97 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Apache CloudStack [![Build Status](https://github.com/apache/cloudstack/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/build.yml) [![UI Build](https://github.com/apache/cloudstack/actions/workflows/ui.yml/badge.svg)](https://github.com/apache/cloudstack/actions/workflows/ui.yml) [![License Check](https://github.com/apache/cloudstack/actions/workflows/rat.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/rat.yml) [![Simulator CI](https://github.com/apache/cloudstack/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/ci.yml) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache_cloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache_cloudstack) [![codecov](https://codecov.io/gh/apache/cloudstack/branch/main/graph/badge.svg)](https://codecov.io/gh/apache/cloudstack) -[![Apache CloudStack](tools/logo/acsxmas.jpg)](https://cloudstack.apache.org/) +[![Apache CloudStack](tools/logo/apache_cloudstack.png)](https://cloudstack.apache.org/) Apache CloudStack is open source software designed to deploy and manage large networks of virtual machines, as a highly available, highly scalable diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 3b6a7b7de29..bff7078fd9f 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -209,7 +209,7 @@ hypervisor.type=kvm # the management server would send. # In case of arm64 (aarch64), this will change the machine type to 'virt' and # adds a SCSI and a USB controller in the domain xml. -# Possible values: x86_64 | aarch64 +# Possible values: x86_64 | aarch64 | s390x # If null (default), defaults to the VM's OS architecture #guest.cpu.arch= @@ -286,6 +286,7 @@ hypervisor.type=kvm # The model of Watchdog timer to present to the Guest. # For all models refer to the libvirt documentation. +# PLEASE NOTE: to disable the watchdogs definitions, use value: none #vm.watchdog.model=i6300esb # Action to take when the Guest/Instance is no longer notifying the Watchdog timer. @@ -433,3 +434,10 @@ iscsi.session.cleanup.enabled=false # Implicit host tags managed by agent.properties # host.tags= + +# Timeout(in seconds) for SSL handshake when agent connects to server. When no value is set then default value of 30s +# will be used +#ssl.handshake.timeout= + +# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used +#backoff.seconds= diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index c84179d6660..0a76bfbb4f8 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -27,23 +27,26 @@ import java.net.UnknownHostException; import java.nio.channels.ClosedChannelException; import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Timer; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import javax.naming.ConfigurationException; -import com.cloud.resource.AgentStatusUpdater; -import com.cloud.resource.ResourceStatusUpdater; -import com.cloud.agent.api.PingAnswer; -import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.agent.lb.SetupMSListAnswer; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.ca.PostCertificateRenewalCommand; @@ -55,10 +58,10 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.ObjectUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.ObjectUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; @@ -67,6 +70,9 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.CronCommand; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainCommand; +import com.cloud.agent.api.MigrateAgentConnectionAnswer; +import com.cloud.agent.api.MigrateAgentConnectionCommand; +import com.cloud.agent.api.PingAnswer; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.ShutdownCommand; @@ -76,9 +82,12 @@ import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.exception.AgentControlChannelException; import com.cloud.host.Host; +import com.cloud.resource.AgentStatusUpdater; +import com.cloud.resource.ResourceStatusUpdater; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; +import com.cloud.utils.StringUtils; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.NioConnectionException; @@ -90,7 +99,6 @@ import com.cloud.utils.nio.NioConnection; import com.cloud.utils.nio.Task; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.logging.log4j.ThreadContext; /** * @config @@ -114,7 +122,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater Configuration(66), // Exiting due to configuration problems. Error(67); // Exiting because of error. - int value; + final int value; ExitStatus(final int value) { this.value = value; @@ -125,133 +133,162 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - List _controlListeners = new ArrayList(); + CopyOnWriteArrayList controlListeners = new CopyOnWriteArrayList<>(); - IAgentShell _shell; - NioConnection _connection; - ServerResource _resource; - Link _link; - Long _id; + IAgentShell shell; + NioConnection connection; + ServerResource serverResource; + Link link; + Long id; String _uuid; String _name; - Timer _timer = new Timer("Agent Timer"); - Timer certTimer; - Timer hostLBTimer; + ScheduledExecutorService selfTaskExecutor; + ScheduledExecutorService certExecutor; + ScheduledExecutorService hostLbCheckExecutor; - List _watchList = new ArrayList(); - long _sequence = 0; - long _lastPingResponseTime = 0; - long _pingInterval = 0; - AtomicInteger _inProgress = new AtomicInteger(); + CopyOnWriteArrayList> watchList = new CopyOnWriteArrayList<>(); + AtomicLong sequence = new AtomicLong(0); + AtomicLong lastPingResponseTime = new AtomicLong(0L); + long pingInterval = 0; + AtomicInteger commandsInProgress = new AtomicInteger(0); - StartupTask _startup = null; - long _startupWaitDefault = 180000; - long _startupWait = _startupWaitDefault; - boolean _reconnectAllowed = true; - //For time sentitive task, e.g. PingTask - ThreadPoolExecutor _ugentTaskPool; - ExecutorService _executor; + private final AtomicReference startupTask = new AtomicReference<>(); + private static final long DEFAULT_STARTUP_WAIT = 180; + long startupWait = DEFAULT_STARTUP_WAIT; + boolean reconnectAllowed = true; - Thread _shutdownThread = new ShutdownThread(this); + //For time sensitive task, e.g. PingTask + ThreadPoolExecutor outRequestHandler; + ExecutorService requestHandler; - private String _keystoreSetupPath; - private String _keystoreCertImportPath; + Thread shutdownThread = new ShutdownThread(this); - // for simulator use only + private String keystoreSetupSetupPath; + private String keystoreCertImportScriptPath; + + private String hostname; + + protected String getLinkLog(final Link link) { + if (link == null) { + return ""; + } + StringBuilder str = new StringBuilder(); + if (logger.isTraceEnabled()) { + str.append(System.identityHashCode(link)).append("-"); + } + str.append(link.getSocketAddress()); + return str.toString(); + } + + protected String getAgentName() { + return (serverResource != null && serverResource.isAppendAgentNameToLogs() && + StringUtils.isNotBlank(serverResource.getName())) ? + serverResource.getName() : + "Agent"; + } + + protected void setupShutdownHookAndInitExecutors() { + logger.trace("Adding shutdown hook"); + Runtime.getRuntime().addShutdownHook(shutdownThread); + selfTaskExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Agent-SelfTask")); + outRequestHandler = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, + new SynchronousQueue<>(), new NamedThreadFactory("AgentOutRequest-Handler")); + requestHandler = new ThreadPoolExecutor(shell.getWorkers(), 5 * shell.getWorkers(), 1, TimeUnit.DAYS, + new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentRequest-Handler")); + } + + /** + * Constructor for the {@code Agent} class, intended for simulator use only. + * + *

This constructor initializes the agent with a provided {@link IAgentShell}. + * It sets up the necessary NIO client connection, establishes a shutdown hook, + * and initializes the thread executors. + * + * @param shell the {@link IAgentShell} instance that provides agent configuration and runtime information. + */ public Agent(final IAgentShell shell) { - _shell = shell; - _link = null; - - _connection = new NioClient("Agent", _shell.getNextHost(), _shell.getPort(), _shell.getWorkers(), this); - - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); + this.shell = shell; + this.link = null; + this.connection = new NioClient( + getAgentName(), + this.shell.getNextHost(), + this.shell.getPort(), + this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), + this + ); + setupShutdownHookAndInitExecutors(); } public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { - _shell = shell; - _resource = resource; - _link = null; - + this.shell = shell; + serverResource = resource; + link = null; resource.setAgentControl(this); - - final String value = _shell.getPersistentProperty(getResourceName(), "id"); - _uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); - _name = _shell.getPersistentProperty(getResourceName(), "name"); - _id = value != null ? Long.parseLong(value) : null; - logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name); + final String value = shell.getPersistentProperty(getResourceName(), "id"); + _uuid = shell.getPersistentProperty(getResourceName(), "uuid"); + _name = shell.getPersistentProperty(getResourceName(), "name"); + id = value != null ? Long.parseLong(value) : null; + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(id, ""), _uuid, _name); final Map params = new HashMap<>(); - // merge with properties from command line to let resource access command line parameters - for (final Map.Entry cmdLineProp : _shell.getCmdLineProperties().entrySet()) { + for (final Map.Entry cmdLineProp : this.shell.getCmdLineProperties().entrySet()) { params.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - - if (!_resource.configure(getResourceName(), params)) { - throw new ConfigurationException("Unable to configure " + _resource.getName()); + if (!serverResource.configure(getResourceName(), params)) { + throw new ConfigurationException("Unable to configure " + serverResource.getName()); } + ThreadContext.put("agentname", getAgentName()); + final String host = this.shell.getNextHost(); + connection = new NioClient(getAgentName(), host, this.shell.getPort(), this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), this); + setupShutdownHookAndInitExecutors(); + logger.info("{} with host = {}, local id = {}", this, host, localAgentId); + } - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); - - logger.debug("Adding shutdown hook"); - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); - - logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", - ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + @Override + public String toString() { + return String.format("Agent [id = %s, uuid = %s, name = %s, type = %s, zone = %s, pod = %s, workers = %d, port = %d]", + ObjectUtils.defaultIfNull(id, "new"), + _uuid, + _name, + getResourceName(), + this.shell.getZone(), + this.shell.getPod(), + this.shell.getWorkers(), + this.shell.getPort()); } public String getVersion() { - return _shell.getVersion(); + return shell.getVersion(); } public String getResourceGuid() { - final String guid = _shell.getGuid(); + final String guid = shell.getGuid(); return guid + "-" + getResourceName(); } public String getZone() { - return _shell.getZone(); + return shell.getZone(); } public String getPod() { - return _shell.getPod(); + return shell.getPod(); } protected void setLink(final Link link) { - _link = link; + this.link = link; } public ServerResource getResource() { - return _resource; - } - - public BackoffAlgorithm getBackoffAlgorithm() { - return _shell.getBackoffAlgorithm(); + return serverResource; } public String getResourceName() { - return _resource.getClass().getSimpleName(); + return serverResource.getClass().getSimpleName(); } /** @@ -260,71 +297,64 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater * agent instances and its inner objects. */ private void scavengeOldAgentObjects() { - _executor.submit(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(2000L); - } catch (final InterruptedException ignored) { - } finally { - System.gc(); - } + requestHandler.submit(() -> { + try { + Thread.sleep(2000L); + } catch (final InterruptedException ignored) { + } finally { + System.gc(); } }); } public void start() { - if (!_resource.start()) { - logger.error("Unable to start the resource: {}", _resource.getName()); - throw new CloudRuntimeException("Unable to start the resource: " + _resource.getName()); + if (!serverResource.start()) { + String msg = String.format("Unable to start the resource: %s", serverResource.getName()); + logger.error(msg); + throw new CloudRuntimeException(msg); } - _keystoreSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); - if (_keystoreSetupPath == null) { + keystoreSetupSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); + if (keystoreSetupSetupPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_SETUP_SCRIPT)); } - _keystoreCertImportPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); - if (_keystoreCertImportPath == null) { + keystoreCertImportScriptPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); + if (keystoreCertImportScriptPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_IMPORT_SCRIPT)); } try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { logger.warn("Attempt to connect to server generated NIO Connection Exception {}, trying again", e.getLocalizedMessage()); } - while (!_connection.isStartup()) { - final String host = _shell.getNextHost(); - _shell.getBackoffAlgorithm().waitBeforeRetry(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Connecting to host:{}", host); + while (!connection.isStartup()) { + final String host = shell.getNextHost(); + shell.getBackoffAlgorithm().waitBeforeRetry(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), + shell.getSslHandshakeTimeout(), this); + logger.info("Connecting to host: {}", host); try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } + stopAndCleanupConnection(false); logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); } } - _shell.updateConnectedHost(); + shell.updateConnectedHost(); scavengeOldAgentObjects(); - } public void stop(final String reason, final String detail) { - logger.info("Stopping the agent: Reason = {} {}", reason, ": Detail = " + ObjectUtils.defaultIfNull(detail, "")); - _reconnectAllowed = false; - if (_connection != null) { + logger.info("Stopping the agent: Reason = {}{}", reason, (detail != null ? ": Detail = " + detail : "")); + reconnectAllowed = false; + if (connection != null) { final ShutdownCommand cmd = new ShutdownCommand(reason, detail); try { - if (_link != null) { - final Request req = new Request(_id != null ? _id : -1, -1, cmd, false); - _link.send(req.toBytes()); + if (link != null) { + final Request req = new Request(id != null ? id : -1, -1, cmd, false); + link.send(req.toBytes()); } } catch (final ClosedChannelException e) { logger.warn("Unable to send: {}", cmd.toString()); @@ -337,53 +367,54 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } catch (final InterruptedException e) { logger.debug("Who the heck interrupted me here?"); } - _connection.stop(); - _connection = null; - _link = null; + connection.stop(); + connection = null; + link = null; } - if (_resource != null) { - _resource.stop(); - _resource = null; + if (serverResource != null) { + serverResource.stop(); + serverResource = null; } - if (_startup != null) { - _startup = null; + if (startupTask.get() != null) { + startupTask.set(null); } - if (_ugentTaskPool != null) { - _ugentTaskPool.shutdownNow(); - _ugentTaskPool = null; + if (outRequestHandler != null) { + outRequestHandler.shutdownNow(); + outRequestHandler = null; } - if (_executor != null) { - _executor.shutdown(); - _executor = null; + if (requestHandler != null) { + requestHandler.shutdown(); + requestHandler = null; } - if (_timer != null) { - _timer.cancel(); - _timer = null; + if (selfTaskExecutor != null) { + selfTaskExecutor.shutdown(); + selfTaskExecutor = null; } - if (hostLBTimer != null) { - hostLBTimer.cancel(); - hostLBTimer = null; + if (hostLbCheckExecutor != null) { + hostLbCheckExecutor.shutdown(); + hostLbCheckExecutor = null; } - if (certTimer != null) { - certTimer.cancel(); - certTimer = null; + if (certExecutor != null) { + certExecutor.shutdown(); + certExecutor = null; } } public Long getId() { - return _id; + return id; } public void setId(final Long id) { - _id = id; - _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); + logger.debug("Set agent id {}", id); + this.id = id; + shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } public String getUuid() { @@ -392,7 +423,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void setUuid(String uuid) { this._uuid = uuid; - _shell.setPersistentProperty(getResourceName(), "uuid", uuid); + shell.setPersistentProperty(getResourceName(), "uuid", uuid); } public String getName() { @@ -401,61 +432,75 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void setName(String name) { this._name = name; - _shell.setPersistentProperty(getResourceName(), "name", name); + shell.setPersistentProperty(getResourceName(), "name", name); } - private synchronized void scheduleServicesRestartTask() { - if (certTimer != null) { - certTimer.cancel(); - certTimer.purge(); + private void scheduleCertificateRenewalTask() { + String name = "CertificateRenewalTask"; + if (certExecutor != null && !certExecutor.isShutdown()) { + certExecutor.shutdown(); + try { + if (!certExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + certExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + certExecutor.shutdownNow(); + } } - certTimer = new Timer("Certificate Renewal Timer"); - certTimer.schedule(new PostCertificateRenewalTask(this), 5000L); + certExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + certExecutor.schedule(new PostCertificateRenewalTask(this), 5, TimeUnit.SECONDS); } - private synchronized void scheduleHostLBCheckerTask(final long checkInterval) { - if (hostLBTimer != null) { - hostLBTimer.cancel(); + private void scheduleHostLBCheckerTask(final long checkInterval) { + String name = "HostLBCheckerTask"; + if (hostLbCheckExecutor != null && !hostLbCheckExecutor.isShutdown()) { + hostLbCheckExecutor.shutdown(); + try { + if (!hostLbCheckExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + hostLbCheckExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + hostLbCheckExecutor.shutdownNow(); + } } if (checkInterval > 0L) { - logger.info("Scheduling preferred host timer task with host.lb.interval={}ms", checkInterval); - hostLBTimer = new Timer("Host LB Timer"); - hostLBTimer.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval); + logger.info("Scheduling preferred host task with host.lb.interval={}ms", checkInterval); + hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval, + TimeUnit.MILLISECONDS); } } public void scheduleWatch(final Link link, final Request request, final long delay, final long period) { - synchronized (_watchList) { - logger.debug("Adding task with request: {} to watch list", request.toString()); - - final WatchTask task = new WatchTask(link, request, this); - _timer.schedule(task, 0, period); - _watchList.add(task); - } + logger.debug("Adding a watch list"); + final WatchTask task = new WatchTask(link, request, this); + final ScheduledFuture future = selfTaskExecutor.scheduleAtFixedRate(task, delay, period, TimeUnit.MILLISECONDS); + watchList.add(future); } public void triggerUpdate() { - PingCommand command = _resource.getCurrentStatus(getId()); + PingCommand command = serverResource.getCurrentStatus(getId()); command.setOutOfBand(true); logger.debug("Sending out of band ping"); - - final Request request = new Request(_id, -1, command, false); + final Request request = new Request(id, -1, command, false); request.setSequence(getNextSequence()); try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to send ping update: {}", request.toString()); } } protected void cancelTasks() { - synchronized (_watchList) { - for (final WatchTask task : _watchList) { - task.cancel(); - } - logger.debug("Clearing {} tasks of watch list", _watchList.size()); - _watchList.clear(); + for (final ScheduledFuture task : watchList) { + task.cancel(true); } + logger.debug("Clearing watch list: {}", () -> watchList.size()); + watchList.clear(); } /** @@ -466,27 +511,52 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater * when host is added back */ protected void cleanupAgentZoneProperties() { - _shell.setPersistentProperty(null, "zone", ""); - _shell.setPersistentProperty(null, "cluster", ""); - _shell.setPersistentProperty(null, "pod", ""); + shell.setPersistentProperty(null, "zone", ""); + shell.setPersistentProperty(null, "cluster", ""); + shell.setPersistentProperty(null, "pod", ""); } - public synchronized void lockStartupTask(final Link link) { - _startup = new StartupTask(link); - _timer.schedule(_startup, _startupWait); + public void lockStartupTask(final Link link) { + logger.debug("Creating startup task for link: {}", () -> getLinkLog(link)); + StartupTask currentTask = startupTask.get(); + if (currentTask != null) { + logger.warn("A Startup task is already locked or in progress, cannot create for link {}", + getLinkLog(link)); + return; + } + currentTask = new StartupTask(link); + if (startupTask.compareAndSet(null, currentTask)) { + selfTaskExecutor.schedule(currentTask, startupWait, TimeUnit.SECONDS); + return; + } + logger.warn("Failed to lock a StartupTask for link: {}", getLinkLog(link)); + } + + protected boolean cancelStartupTask() { + StartupTask task = startupTask.getAndSet(null); + if (task != null) { + task.cancel(); + return true; + } + return false; } public void sendStartup(final Link link) { - final StartupCommand[] startup = _resource.initialize(); + sendStartup(link, false); + } + + public void sendStartup(final Link link, boolean transfer) { + final StartupCommand[] startup = serverResource.initialize(); if (startup != null) { - final String msHostList = _shell.getPersistentProperty(null, "host"); + final String msHostList = shell.getPersistentProperty(null, "host"); final Command[] commands = new Command[startup.length]; for (int i = 0; i < startup.length; i++) { setupStartupCommand(startup[i]); startup[i].setMSHostList(msHostList); + startup[i].setConnectionTransferred(transfer); commands[i] = startup[i]; } - final Request request = new Request(_id != null ? _id : -1, -1, commands, false, false); + final Request request = new Request(id != null ? id : -1, -1, commands, false, false); request.setSequence(getNextSequence()); logger.debug("Sending Startup: {}", request.toString()); @@ -494,31 +564,37 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater try { link.send(request.toBytes()); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(link), e.getMessage(), request); } - if (_resource instanceof ResourceStatusUpdater) { - ((ResourceStatusUpdater) _resource).registerStatusUpdater(this); + if (serverResource instanceof ResourceStatusUpdater) { + ((ResourceStatusUpdater) serverResource).registerStatusUpdater(this); } } } - protected void setupStartupCommand(final StartupCommand startup) { - InetAddress addr; + protected String retrieveHostname() { + logger.trace("Retrieving hostname with resource={}", () -> serverResource.getClass().getSimpleName()); + final String result = Script.runSimpleBashScript(Script.getExecutableAbsolutePath("hostname"), 500); + if (StringUtils.isNotBlank(result)) { + return result; + } try { - addr = InetAddress.getLocalHost(); + InetAddress address = InetAddress.getLocalHost(); + return address.toString(); } catch (final UnknownHostException e) { logger.warn("unknown host? ", e); throw new CloudRuntimeException("Cannot get local IP address"); } + } - final Script command = new Script("hostname", 500, logger); - final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - final String result = command.execute(parser); - final String hostname = result == null ? parser.getLine() : addr.toString(); - + protected void setupStartupCommand(final StartupCommand startup) { startup.setId(getId()); - if (startup.getName() == null) { + if (StringUtils.isBlank(startup.getName())) { + if (StringUtils.isBlank(hostname)) { + hostname = retrieveHostname(); + } startup.setName(hostname); } startup.setDataCenter(getZone()); @@ -541,77 +617,82 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } protected void reconnect(final Link link) { - if (!_reconnectAllowed) { + reconnect(link, null, null, false); + } + + protected void reconnect(final Link link, String preferredHost, List avoidHostList, boolean forTransfer) { + if (!(forTransfer || reconnectAllowed)) { return; } - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } - } - if (link != null) { - link.close(); - link.terminated(); + if (!reconnectAllowed) { + logger.debug("Reconnect requested but it is not allowed {}", () -> getLinkLog(link)); + return; } - + cancelStartupTask(); + closeAndTerminateLink(link); + closeAndTerminateLink(this.link); setLink(null); cancelTasks(); + serverResource.disconnected(); + logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", shell.getConnectedHost(), commandsInProgress.get()); + stopAndCleanupConnection(true); + do { + final String host = shell.getNextHost(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), shell.getSslHandshakeTimeout(), this); + logger.info("Reconnecting to host: {}", host); + try { + connection.start(); + } catch (final NioConnectionException e) { + logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); + stopAndCleanupConnection(false); + } + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (!connection.isStartup()); + shell.updateConnectedHost(); + logger.info("Connected to the host: {}", shell.getConnectedHost()); + } - _resource.disconnected(); - - logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", _shell.getConnectedHost(), _inProgress.get()); - - _connection.stop(); + protected void closeAndTerminateLink(final Link link) { + if (link == null) { + return; + } + link.close(); + link.terminated(); + } + protected void stopAndCleanupConnection(boolean waitForStop) { + if (connection == null) { + return; + } + connection.stop(); try { - _connection.cleanUp(); + connection.cleanUp(); } catch (final IOException e) { logger.warn("Fail to clean up old connection. {}", e); } - - while (_connection.isStartup()) { - _shell.getBackoffAlgorithm().waitBeforeRetry(); + if (!waitForStop) { + return; } - do { - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Reconnecting to host:{}", host); - try { - _connection.start(); - } catch (final NioConnectionException e) { - logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } - } - _shell.getBackoffAlgorithm().waitBeforeRetry(); - } while (!_connection.isStartup()); - _shell.updateConnectedHost(); - logger.info("Connected to the host: {}", _shell.getConnectedHost()); + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (connection.isStartup()); } public void processStartupAnswer(final Answer answer, final Response response, final Link link) { - boolean cancelled = false; - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } else { - cancelled = true; - } - } + boolean answerValid = cancelStartupTask(); final StartupAnswer startup = (StartupAnswer)answer; if (!startup.getResult()) { logger.error("Not allowed to connect to the server: {}", answer.getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); } - if (cancelled) { + if (!answerValid) { logger.warn("Threw away a startup answer because we're reconnecting."); return; } @@ -622,12 +703,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater setId(startup.getHostId()); setUuid(startup.getHostUuid()); setName(startup.getHostName()); - _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. + pingInterval = startup.getPingInterval() * 1000L; // change to ms. - setLastPingResponseTime(); - scheduleWatch(link, response, _pingInterval, _pingInterval); + updateLastPingResponseTime(); + scheduleWatch(link, response, pingInterval, pingInterval); - _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); + outRequestHandler.setKeepAliveTime(2 * pingInterval, TimeUnit.MILLISECONDS); logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName()); @@ -661,7 +742,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (cmd instanceof CronCommand) { final CronCommand watch = (CronCommand)cmd; - scheduleWatch(link, request, (long)watch.getInterval() * 1000, watch.getInterval() * 1000); + scheduleWatch(link, request, watch.getInterval() * 1000L, watch.getInterval() * 1000L); answer = new Answer(cmd, true, null); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; @@ -670,10 +751,17 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (shutdown.isRemoveHost()) { cleanupAgentZoneProperties(); } - _reconnectAllowed = false; + reconnectAllowed = false; answer = new Answer(cmd, true, null); } else if (cmd instanceof ReadyCommand && ((ReadyCommand)cmd).getDetails() != null) { + logger.debug("Not ready to connect to mgt server: {}", ((ReadyCommand)cmd).getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); return; } else if (cmd instanceof MaintainCommand) { @@ -681,12 +769,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater answer = new MaintainAnswer((MaintainCommand)cmd); } else if (cmd instanceof AgentControlCommand) { answer = null; - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - answer = listener.processControlRequest(request, (AgentControlCommand)cmd); - if (answer != null) { - break; - } + for (final IAgentControlListener listener : controlListeners) { + answer = listener.processControlRequest(request, (AgentControlCommand)cmd); + if (answer != null) { + break; } } @@ -698,20 +784,22 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater answer = setupAgentKeystore((SetupKeyStoreCommand) cmd); } else if (cmd instanceof SetupCertificateCommand && ((SetupCertificateCommand) cmd).isHandleByAgent()) { answer = setupAgentCertificate((SetupCertificateCommand) cmd); - if (Host.Type.Routing.equals(_resource.getType())) { - scheduleServicesRestartTask(); + if (Host.Type.Routing.equals(serverResource.getType())) { + scheduleCertificateRenewalTask(); } } else if (cmd instanceof SetupMSListCommand) { answer = setupManagementServerList((SetupMSListCommand) cmd); + } else if (cmd instanceof MigrateAgentConnectionCommand) { + answer = migrateAgentToOtherMS((MigrateAgentConnectionCommand) cmd); } else { if (cmd instanceof ReadyCommand) { processReadyCommand(cmd); } - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(cmd); + answer = serverResource.executeRequest(cmd); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer == null) { logger.debug("Response: unsupported command {}", cmd.toString()); @@ -765,13 +853,13 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater final String keyStoreFile = agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME; final String csrFile = agentFile.getParent() + "/" + KeyStoreUtils.CSR_FILENAME; - String storedPassword = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + String storedPassword = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); if (StringUtils.isEmpty(storedPassword)) { storedPassword = keyStorePassword; - _shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); + shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); } - Script script = new Script(_keystoreSetupPath, 300000, logger); + Script script = new Script(keystoreSetupSetupPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(keyStoreFile); script.add(storedPassword); @@ -815,8 +903,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater throw new CloudRuntimeException("Unable to save received agent client and ca certificates", e); } - String ksPassphrase = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); - Script script = new Script(_keystoreCertImportPath, 300000, logger); + String ksPassphrase = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + Script script = new Script(keystoreCertImportScriptPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(ksPassphrase); script.add(keyStoreFile); @@ -838,9 +926,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (CollectionUtils.isNotEmpty(msList) && StringUtils.isNotEmpty(lbAlgorithm)) { try { final String newMSHosts = String.format("%s%s%s", com.cloud.utils.StringUtils.toCSVList(msList), IAgentShell.hostLbAlgorithmSeparator, lbAlgorithm); - _shell.setPersistentProperty(null, "host", newMSHosts); - _shell.setHosts(newMSHosts); - _shell.resetHostCounter(); + shell.setPersistentProperty(null, "host", newMSHosts); + shell.setHosts(newMSHosts); + shell.resetHostCounter(); logger.info("Processed new management server list: {}", newMSHosts); } catch (final Exception e) { throw new CloudRuntimeException("Could not persist received management servers list", e); @@ -849,7 +937,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if ("shuffle".equals(lbAlgorithm)) { scheduleHostLBCheckerTask(0); } else { - scheduleHostLBCheckerTask(_shell.getLbCheckerInterval(lbCheckInterval)); + scheduleHostLBCheckerTask(shell.getLbCheckerInterval(lbCheckInterval)); } } @@ -858,6 +946,53 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return new SetupMSListAnswer(true); } + private Answer migrateAgentToOtherMS(final MigrateAgentConnectionCommand cmd) { + try { + if (CollectionUtils.isNotEmpty(cmd.getMsList())) { + processManagementServerList(cmd.getMsList(), cmd.getLbAlgorithm(), cmd.getLbCheckInterval()); + } + migrateAgentConnection(cmd.getAvoidMsList()); + } catch (Exception e) { + String errMsg = "Migrate agent connection failed, due to " + e.getMessage(); + logger.debug(errMsg, e); + return new MigrateAgentConnectionAnswer(errMsg); + } + return new MigrateAgentConnectionAnswer(true); + } + + private void migrateAgentConnection(List avoidMsList) { + final String[] msHosts = shell.getHosts(); + if (msHosts == null || msHosts.length < 1) { + throw new CloudRuntimeException("Management Server hosts empty, not properly configured in agent"); + } + + List msHostsList = new ArrayList<>(Arrays.asList(msHosts)); + msHostsList.removeAll(avoidMsList); + if (msHostsList.isEmpty() || StringUtils.isEmpty(msHostsList.get(0))) { + throw new CloudRuntimeException("No other Management Server hosts to migrate"); + } + + String preferredHost = null; + for (String msHost : msHostsList) { + try (final Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(msHost, shell.getPort()), 5000); + preferredHost = msHost; + break; + } catch (final IOException e) { + throw new CloudRuntimeException("Management server host: " + msHost + " is not reachable, to migrate connection"); + } + } + + if (preferredHost == null) { + throw new CloudRuntimeException("Management server host(s) are not reachable, to migrate connection"); + } + + logger.debug("Management server host " + preferredHost + " is found to be reachable, trying to reconnect"); + shell.resetHostCounter(); + shell.setConnectionTransfer(true); + reconnect(link, preferredHost, avoidMsList, true); + } + public void processResponse(final Response response, final Link link) { final Answer answer = response.getAnswer(); logger.debug("Received response: {}", response.toString()); @@ -865,16 +1000,14 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater processStartupAnswer(answer, response, link); } else if (answer instanceof AgentControlAnswer) { // Notice, we are doing callback while holding a lock! - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - listener.processControlResponse(response, (AgentControlAnswer)answer); - } + for (final IAgentControlListener listener : controlListeners) { + listener.processControlResponse(response, (AgentControlAnswer)answer); } - } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && _reconnectAllowed) { + } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && reconnectAllowed) { logger.info("Management server requested startup command to reinitialize the agent"); sendStartup(link); } else { - setLastPingResponseTime(); + updateLastPingResponseTime(); } } @@ -911,22 +1044,24 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void processOtherTask(final Task task) { final Object obj = task.get(); if (obj instanceof Response) { - if (System.currentTimeMillis() - _lastPingResponseTime > _pingInterval * _shell.getPingRetries()) { - logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", _pingInterval * _shell.getPingRetries()); + if (System.currentTimeMillis() - lastPingResponseTime.get() > pingInterval * shell.getPingRetries()) { + logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", + pingInterval * shell.getPingRetries()); return; } - final PingCommand ping = _resource.getCurrentStatus(getId()); - final Request request = new Request(_id, -1, ping, false); + final PingCommand ping = serverResource.getCurrentStatus(getId()); + final Request request = new Request(id, -1, ping, false); request.setSequence(getNextSequence()); logger.debug("Sending ping: {}", request.toString()); try { task.getLink().send(request.toBytes()); //if i can send pingcommand out, means the link is ok - setLastPingResponseTime(); + updateLastPingResponseTime(); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(task.getLink()), e.getMessage(), request); } } else if (obj instanceof Request) { @@ -936,11 +1071,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater ThreadContext.put("logcontextid", command.getContextParam("logid")); } Answer answer = null; - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(command); + answer = serverResource.executeRequest(command); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer != null) { final Response response = new Response(req, answer); @@ -957,35 +1092,29 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public synchronized void setLastPingResponseTime() { - _lastPingResponseTime = System.currentTimeMillis(); + public void updateLastPingResponseTime() { + lastPingResponseTime.set(System.currentTimeMillis()); } - protected synchronized long getNextSequence() { - return _sequence++; + protected long getNextSequence() { + return sequence.getAndIncrement(); } @Override public void registerControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.add(listener); - } + controlListeners.add(listener); } @Override public void unregisterControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.remove(listener); - } + controlListeners.remove(listener); } @Override public AgentControlAnswer sendRequest(final AgentControlCommand cmd, final int timeoutInMilliseconds) throws AgentControlChannelException { final Request request = new Request(getId(), -1, new Command[] {cmd}, true, false); request.setSequence(getNextSequence()); - final AgentControlListener listener = new AgentControlListener(request); - registerControlListener(listener); try { postRequest(request); @@ -996,7 +1125,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater logger.warn("sendRequest is interrupted, exit waiting"); } } - return listener.getAnswer(); } finally { unregisterControlListener(listener); @@ -1011,9 +1139,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } private void postRequest(final Request request) throws AgentControlChannelException { - if (_link != null) { + if (link != null) { try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to post agent control request: {}", request.toString()); throw new AgentControlChannelException("Unable to post agent control request due to " + e.getMessage()); @@ -1065,26 +1193,26 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public class WatchTask extends ManagedContextTimerTask { + public class WatchTask implements Runnable { protected Request _request; protected Agent _agent; - protected Link _link; + protected Link link; public WatchTask(final Link link, final Request request, final Agent agent) { super(); _request = request; - _link = link; + this.link = link; _agent = agent; } @Override - protected void runInContext() { + public void run() { logger.trace("Scheduling {}", (_request instanceof Response ? "Ping" : "Watch Task")); try { if (_request instanceof Response) { - _ugentTaskPool.submit(new ServerHandler(Task.Type.OTHER, _link, _request)); + outRequestHandler.submit(new ServerHandler(Task.Type.OTHER, link, _request)); } else { - _link.schedule(new ServerHandler(Task.Type.OTHER, _link, _request)); + link.schedule(new ServerHandler(Task.Type.OTHER, link, _request)); } } catch (final ClosedChannelException e) { logger.warn("Unable to schedule task because channel is closed"); @@ -1092,35 +1220,32 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public class StartupTask extends ManagedContextTimerTask { - protected Link _link; - protected volatile boolean cancelled = false; + public class StartupTask implements Runnable { + protected Link link; + private final AtomicBoolean cancelled = new AtomicBoolean(false); public StartupTask(final Link link) { logger.debug("Startup task created"); - _link = link; + this.link = link; } - @Override - public synchronized boolean cancel() { + public boolean cancel() { // TimerTask.cancel may fail depends on the calling context - if (!cancelled) { - cancelled = true; - _startupWait = _startupWaitDefault; + if (cancelled.compareAndSet(false, true)) { + startupWait = DEFAULT_STARTUP_WAIT; logger.debug("Startup task cancelled"); - return super.cancel(); } return true; } @Override - protected synchronized void runInContext() { - if (!cancelled) { - logger.info("The startup command is now cancelled"); - cancelled = true; - _startup = null; - _startupWait = _startupWaitDefault * 2; - reconnect(_link); + public void run() { + if (cancelled.compareAndSet(false, true)) { + logger.info("The running startup command is now invalid. Attempting reconnect"); + startupTask.set(null); + startupWait = DEFAULT_STARTUP_WAIT * 2; + logger.debug("Executing reconnect from task - {}", () -> getLinkLog(link)); + reconnect(link); } } } @@ -1151,9 +1276,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater @Override public void doTask(final Task task) throws TaskExecutionException { if (task.getType() == Task.Type.CONNECT) { - _shell.getBackoffAlgorithm().reset(); + shell.getBackoffAlgorithm().reset(); setLink(task.getLink()); - sendStartup(task.getLink()); + sendStartup(task.getLink(), shell.isConnectionTransfer()); + shell.setConnectionTransfer(false); } else if (task.getType() == Task.Type.DATA) { Request request; try { @@ -1164,7 +1290,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } else { //put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool //processRequest(request, task.getLink()); - _executor.submit(new AgentRequestHandler(getType(), getLink(), request)); + requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request)); } } catch (final ClassNotFoundException e) { logger.error("Unable to find this request "); @@ -1178,8 +1304,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater Thread.sleep(5000); } catch (InterruptedException e) { } + shell.setConnectionTransfer(false); + logger.debug("Executing disconnect task - {}", () -> getLinkLog(task.getLink())); reconnect(task.getLink()); - return; } else if (task.getType() == Task.Type.OTHER) { processOtherTask(task); } @@ -1202,26 +1329,26 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater protected void runInContext() { while (true) { try { - if (_inProgress.get() == 0) { + if (commandsInProgress.get() == 0) { logger.debug("Running post certificate renewal task to restart services."); // Let the resource perform any post certificate renewal cleanups - _resource.executeRequest(new PostCertificateRenewalCommand()); + serverResource.executeRequest(new PostCertificateRenewalCommand()); - IAgentShell shell = agent._shell; - ServerResource resource = agent._resource.getClass().newInstance(); + IAgentShell shell = agent.shell; + ServerResource resource = agent.serverResource.getClass().getDeclaredConstructor().newInstance(); // Stop current agent agent.cancelTasks(); - agent._reconnectAllowed = false; - Runtime.getRuntime().removeShutdownHook(agent._shutdownThread); + agent.reconnectAllowed = false; + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); agent.stop(ShutdownCommand.Requested, "Restarting due to new X509 certificates"); // Nullify references for GC - agent._shell = null; - agent._watchList = null; - agent._shutdownThread = null; - agent._controlListeners = null; + agent.shell = null; + agent.watchList = null; + agent.shutdownThread = null; + agent.controlListeners = null; agent = null; // Start a new agent instance @@ -1229,7 +1356,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return; } logger.debug("Other tasks are in progress, will retry post certificate renewal command after few seconds"); - Thread.sleep(5000); } catch (final Exception e) { logger.warn("Failed to execute post certificate renewal command:", e); @@ -1244,35 +1370,34 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater @Override protected void runInContext() { try { - final String[] msList = _shell.getHosts(); + final String[] msList = shell.getHosts(); if (msList == null || msList.length < 1) { return; } final String preferredHost = msList[0]; - final String connectedHost = _shell.getConnectedHost(); - logger.trace("Running preferred host checker task, connected host={}, preferred host={}", connectedHost, preferredHost); - - if (preferredHost != null && !preferredHost.equals(connectedHost) && _link != null) { - boolean isHostUp = true; - try (final Socket socket = new Socket()) { - socket.connect(new InetSocketAddress(preferredHost, _shell.getPort()), 5000); - } catch (final IOException e) { - isHostUp = false; - logger.trace("Host: {} is not reachable", preferredHost); - - } - if (isHostUp && _link != null && _inProgress.get() == 0) { + final String connectedHost = shell.getConnectedHost(); + logger.debug("Running preferred host checker task, connected host={}, preferred host={}", + connectedHost, preferredHost); + if (preferredHost == null || preferredHost.equals(connectedHost) || link == null) { + return; + } + boolean isHostUp = false; + try (final Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(preferredHost, shell.getPort()), 5000); + isHostUp = true; + } catch (final IOException e) { + logger.debug("Host: {} is not reachable", preferredHost); + } + if (isHostUp && link != null && commandsInProgress.get() == 0) { + if (logger.isDebugEnabled()) { logger.debug("Preferred host {} is found to be reachable, trying to reconnect", preferredHost); - - _shell.resetHostCounter(); - reconnect(_link); } + shell.resetHostCounter(); + reconnect(link); } } catch (Throwable t) { logger.error("Error caught while attempting to connect to preferred host", t); } } - } - } diff --git a/agent/src/main/java/com/cloud/agent/AgentShell.java b/agent/src/main/java/com/cloud/agent/AgentShell.java index 0699e00250b..aea7fd3a8de 100644 --- a/agent/src/main/java/com/cloud/agent/AgentShell.java +++ b/agent/src/main/java/com/cloud/agent/AgentShell.java @@ -16,29 +16,6 @@ // under the License. package com.cloud.agent; -import com.cloud.agent.Agent.ExitStatus; -import com.cloud.agent.dao.StorageComponent; -import com.cloud.agent.dao.impl.PropertiesStorage; -import com.cloud.agent.properties.AgentProperties; -import com.cloud.agent.properties.AgentPropertiesFileHandler; -import com.cloud.resource.ServerResource; -import com.cloud.utils.LogUtils; -import com.cloud.utils.ProcessUtil; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; -import com.cloud.utils.backoff.impl.ConstantTimeBackoff; -import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.commons.daemon.Daemon; -import org.apache.commons.daemon.DaemonContext; -import org.apache.commons.daemon.DaemonInitException; -import org.apache.commons.lang.math.NumberUtils; -import org.apache.commons.lang3.BooleanUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.config.Configurator; - -import javax.naming.ConfigurationException; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -53,6 +30,31 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; +import javax.naming.ConfigurationException; + +import org.apache.commons.daemon.Daemon; +import org.apache.commons.daemon.DaemonContext; +import org.apache.commons.daemon.DaemonInitException; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.config.Configurator; + +import com.cloud.agent.Agent.ExitStatus; +import com.cloud.agent.dao.StorageComponent; +import com.cloud.agent.dao.impl.PropertiesStorage; +import com.cloud.agent.properties.AgentProperties; +import com.cloud.agent.properties.AgentPropertiesFileHandler; +import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; +import com.cloud.utils.ProcessUtil; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.backoff.BackoffAlgorithm; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.exception.CloudRuntimeException; + public class AgentShell implements IAgentShell, Daemon { protected static Logger LOGGER = LogManager.getLogger(AgentShell.class); @@ -77,6 +79,7 @@ public class AgentShell implements IAgentShell, Daemon { private String hostToConnect; private String connectedHost; private Long preferredHostCheckInterval; + private boolean connectionTransfer = false; protected AgentProperties agentProperties = new AgentProperties(); public AgentShell() { @@ -215,6 +218,14 @@ public class AgentShell implements IAgentShell, Daemon { _storage.persist(name, value); } + public boolean isConnectionTransfer() { + return connectionTransfer; + } + + public void setConnectionTransfer(boolean connectionTransfer) { + this.connectionTransfer = connectionTransfer; + } + void loadProperties() throws ConfigurationException { final File file = PropertiesUtil.findConfigFile("agent.properties"); @@ -406,7 +417,9 @@ public class AgentShell implements IAgentShell, Daemon { LOGGER.info("Defaulting to the constant time backoff algorithm"); _backoff = new ConstantTimeBackoff(); - _backoff.configure("ConstantTimeBackoff", new HashMap()); + Map map = new HashMap<>(); + map.put("seconds", _properties.getProperty("backoff.seconds")); + _backoff.configure("ConstantTimeBackoff", map); } private void launchAgent() throws ConfigurationException { @@ -455,6 +468,11 @@ public class AgentShell implements IAgentShell, Daemon { agent.start(); } + @Override + public Integer getSslHandshakeTimeout() { + return AgentPropertiesFileHandler.getPropertyValue(AgentProperties.SSL_HANDSHAKE_TIMEOUT); + } + public synchronized int getNextAgentId() { return _nextAgentId++; } diff --git a/agent/src/main/java/com/cloud/agent/IAgentShell.java b/agent/src/main/java/com/cloud/agent/IAgentShell.java index 2dd08fffd45..c0ecd90ae69 100644 --- a/agent/src/main/java/com/cloud/agent/IAgentShell.java +++ b/agent/src/main/java/com/cloud/agent/IAgentShell.java @@ -70,4 +70,10 @@ public interface IAgentShell { String getConnectedHost(); void launchNewAgent(ServerResource resource) throws ConfigurationException; + + boolean isConnectionTransfer(); + + void setConnectionTransfer(boolean connectionTransfer); + + Integer getSslHandshakeTimeout(); } diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 8f97edc3935..61cd27fff77 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -383,7 +383,7 @@ public class AgentProperties{ /** * This param will set the CPU architecture for the domain to override what the management server would send.
* In case of arm64 (aarch64), this will change the machine type to 'virt' and add a SCSI and a USB controller in the domain XML.
- * Possible values: x86_64 | aarch64
+ * Possible values: x86_64 | aarch64 | s390x
* Data type: String.
* Default value: null (will set use the architecture of the VM's OS). */ @@ -516,6 +516,7 @@ public class AgentProperties{ /** * The model of Watchdog timer to present to the Guest.
* For all models refer to the libvirt documentation.
+ * PLEASE NOTE: to disable the watchdogs definitions, use value: none * Data type: String.
* Default value: i6300esb */ @@ -810,6 +811,13 @@ public class AgentProperties{ */ public static final Property HOST_TAGS = new Property<>("host.tags", null, String.class); + /** + * Timeout for SSL handshake in seconds + * Data type: Integer.
+ * Default value: null + */ + public static final Property SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", null, Integer.class); + public static class Property { private String name; private T defaultValue; diff --git a/agent/src/test/java/com/cloud/agent/AgentShellTest.java b/agent/src/test/java/com/cloud/agent/AgentShellTest.java index 4126692546f..6d9758cc3dc 100644 --- a/agent/src/test/java/com/cloud/agent/AgentShellTest.java +++ b/agent/src/test/java/com/cloud/agent/AgentShellTest.java @@ -362,4 +362,11 @@ public class AgentShellTest { Assert.assertEquals(expected, shell.getConnectedHost()); } + + @Test + public void testGetSslHandshakeTimeout() { + Integer expected = 1; + agentPropertiesFileHandlerMocked.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.eq(AgentProperties.SSL_HANDSHAKE_TIMEOUT))).thenReturn(expected); + Assert.assertEquals(expected, agentShellSpy.getSslHandshakeTimeout()); + } } diff --git a/agent/src/test/java/com/cloud/agent/AgentTest.java b/agent/src/test/java/com/cloud/agent/AgentTest.java new file mode 100644 index 00000000000..65dc030ebd7 --- /dev/null +++ b/agent/src/test/java/com/cloud/agent/AgentTest.java @@ -0,0 +1,257 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import javax.naming.ConfigurationException; + +import org.apache.logging.log4j.Logger; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.resource.ServerResource; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.nio.Link; +import com.cloud.utils.nio.NioConnection; + +@RunWith(MockitoJUnitRunner.class) +public class AgentTest { + Agent agent; + private AgentShell shell; + private ServerResource serverResource; + private Logger logger; + + @Before + public void setUp() throws ConfigurationException { + shell = mock(AgentShell.class); + serverResource = mock(ServerResource.class); + doReturn(true).when(serverResource).configure(any(), any()); + doReturn(1).when(shell).getWorkers(); + doReturn(1).when(shell).getPingRetries(); + agent = new Agent(shell, 1, serverResource); + logger = mock(Logger.class); + ReflectionTestUtils.setField(agent, "logger", logger); + } + + @Test + public void testGetLinkLogNullLinkReturnsEmptyString() { + Link link = null; + String result = agent.getLinkLog(link); + assertEquals("", result); + } + + @Test + public void testGetLinkLogLinkWithTraceEnabledReturnsLinkLogWithHashCode() { + Link link = mock(Link.class); + InetSocketAddress socketAddress = new InetSocketAddress("192.168.1.100", 1111); + when(link.getSocketAddress()).thenReturn(socketAddress); + when(logger.isTraceEnabled()).thenReturn(true); + + String result = agent.getLinkLog(link); + System.out.println(result); + assertTrue(result.startsWith(System.identityHashCode(link) + "-")); + assertTrue(result.contains("192.168.1.100")); + } + + @Test + public void testGetAgentNameWhenServerResourceIsNull() { + ReflectionTestUtils.setField(agent, "serverResource", null); + assertEquals("Agent", agent.getAgentName()); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsTrue() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(true); + when(serverResource.getName()).thenReturn("TestAgent"); + + String agentName = agent.getAgentName(); + assertEquals("TestAgent", agentName); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsFalse() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(false); + + String agentName = agent.getAgentName(); + assertEquals("Agent", agentName); + } + + @Test + public void testAgentInitialization() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + when(shell.getPingRetries()).thenReturn(3); + when(shell.getWorkers()).thenReturn(5); + agent.setupShutdownHookAndInitExecutors(); + assertNotNull(agent.selfTaskExecutor); + assertNotNull(agent.outRequestHandler); + assertNotNull(agent.requestHandler); + } + + @Test + public void testAgentShutdownHookAdded() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + agent.setupShutdownHookAndInitExecutors(); + verify(logger).trace("Adding shutdown hook"); + } + + @Test + public void testGetResourceGuidValidGuidAndResourceName() { + when(shell.getGuid()).thenReturn("12345"); + String result = agent.getResourceGuid(); + assertTrue(result.startsWith("12345-" + ServerResource.class.getSimpleName())); + } + + @Test + public void testGetZoneReturnsValidZone() { + when(shell.getZone()).thenReturn("ZoneA"); + String result = agent.getZone(); + assertEquals("ZoneA", result); + } + + @Test + public void testGetPodReturnsValidPod() { + when(shell.getPod()).thenReturn("PodA"); + String result = agent.getPod(); + assertEquals("PodA", result); + } + + @Test + public void testSetLinkAssignsLink() { + Link mockLink = mock(Link.class); + agent.setLink(mockLink); + assertEquals(mockLink, agent.link); + } + + @Test + public void testGetResourceReturnsServerResource() { + ServerResource mockResource = mock(ServerResource.class); + ReflectionTestUtils.setField(agent, "serverResource", mockResource); + ServerResource result = agent.getResource(); + assertSame(mockResource, result); + } + + @Test + public void testGetResourceName() { + String result = agent.getResourceName(); + assertTrue(result.startsWith(ServerResource.class.getSimpleName())); + } + + @Test + public void testUpdateLastPingResponseTimeUpdatesCurrentTime() { + long beforeUpdate = System.currentTimeMillis(); + agent.updateLastPingResponseTime(); + long updatedTime = agent.lastPingResponseTime.get(); + assertTrue(updatedTime >= beforeUpdate); + assertTrue(updatedTime <= System.currentTimeMillis()); + } + + @Test + public void testGetNextSequenceIncrementsSequence() { + long initialSequence = agent.getNextSequence(); + long nextSequence = agent.getNextSequence(); + assertEquals(initialSequence + 1, nextSequence); + long thirdSequence = agent.getNextSequence(); + assertEquals(nextSequence + 1, thirdSequence); + } + + @Test + public void testRegisterControlListenerAddsListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + } + + @Test + public void testUnregisterControlListenerRemovesListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + agent.unregisterControlListener(listener); + assertFalse(agent.controlListeners.contains(listener)); + } + + @Test + public void testCloseAndTerminateLinkLinkIsNullDoesNothing() { + agent.closeAndTerminateLink(null); + } + + @Test + public void testCloseAndTerminateLinkValidLinkCallsCloseAndTerminate() { + Link mockLink = mock(Link.class); + agent.closeAndTerminateLink(mockLink); + verify(mockLink).close(); + verify(mockLink).terminated(); + } + + @Test + public void testStopAndCleanupConnectionConnectionIsNullDoesNothing() { + agent.connection = null; + agent.stopAndCleanupConnection(false); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionNoWaitStopsAndCleansUp() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + } + + @Test + public void testStopAndCleanupConnectionCleanupThrowsIOExceptionLogsWarning() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + doThrow(new IOException("Cleanup failed")).when(mockConnection).cleanUp(); + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(logger).warn(eq("Fail to clean up old connection. {}"), any(IOException.class)); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionWaitForStopWaitsForStartupToStop() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + ConstantTimeBackoff mockBackoff = mock(ConstantTimeBackoff.class); + mockBackoff.setTimeToWait(0); + agent.connection = mockConnection; + when(shell.getBackoffAlgorithm()).thenReturn(mockBackoff); + when(mockConnection.isStartup()).thenReturn(true, true, false); + agent.stopAndCleanupConnection(true); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + verify(mockBackoff, times(3)).waitBeforeRetry(); + } +} diff --git a/api/src/main/java/com/cloud/host/Host.java b/api/src/main/java/com/cloud/host/Host.java index 56b4ed75a31..afac6df5631 100644 --- a/api/src/main/java/com/cloud/host/Host.java +++ b/api/src/main/java/com/cloud/host/Host.java @@ -177,6 +177,8 @@ public interface Host extends StateObject, Identity, Partition, HAResour */ Long getManagementServerId(); + Long getLastManagementServerId(); + /* *@return removal date */ diff --git a/api/src/main/java/com/cloud/host/Status.java b/api/src/main/java/com/cloud/host/Status.java index 5dc82bbfaef..af6af82e973 100644 --- a/api/src/main/java/com/cloud/host/Status.java +++ b/api/src/main/java/com/cloud/host/Status.java @@ -127,6 +127,7 @@ public enum Status { s_fsm.addTransition(Status.Connecting, Event.HostDown, Status.Down); s_fsm.addTransition(Status.Connecting, Event.Ping, Status.Connecting); s_fsm.addTransition(Status.Connecting, Event.ManagementServerDown, Status.Disconnected); + s_fsm.addTransition(Status.Connecting, Event.StartAgentRebalance, Status.Rebalancing); s_fsm.addTransition(Status.Connecting, Event.AgentDisconnected, Status.Alert); s_fsm.addTransition(Status.Up, Event.PingTimeout, Status.Alert); s_fsm.addTransition(Status.Up, Event.AgentDisconnected, Status.Alert); diff --git a/api/src/main/java/com/cloud/resource/ResourceService.java b/api/src/main/java/com/cloud/resource/ResourceService.java index 2757c918ed6..562c3c418df 100644 --- a/api/src/main/java/com/cloud/resource/ResourceService.java +++ b/api/src/main/java/com/cloud/resource/ResourceService.java @@ -23,11 +23,11 @@ import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.UpdateClusterCmd; import org.apache.cloudstack.api.command.admin.host.AddHostCmd; import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd; -import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.CancelHostMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; -import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.PrepareForHostMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; @@ -51,7 +51,7 @@ public interface ResourceService { Host autoUpdateHostAllocationState(Long hostId, ResourceState.Event resourceEvent) throws NoTransitionException; - Host cancelMaintenance(CancelMaintenanceCmd cmd); + Host cancelMaintenance(CancelHostMaintenanceCmd cmd); Host reconnectHost(ReconnectHostCmd cmd) throws AgentUnavailableException; @@ -69,7 +69,7 @@ public interface ResourceService { List discoverHosts(AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; - Host maintain(PrepareForMaintenanceCmd cmd); + Host maintain(PrepareForHostMaintenanceCmd cmd); Host declareHostAsDegraded(DeclareHostAsDegradedCmd cmd) throws NoTransitionException; diff --git a/api/src/main/java/com/cloud/server/ManagementServerHostStats.java b/api/src/main/java/com/cloud/server/ManagementServerHostStats.java index 1eea7addba3..6eb275031e8 100644 --- a/api/src/main/java/com/cloud/server/ManagementServerHostStats.java +++ b/api/src/main/java/com/cloud/server/ManagementServerHostStats.java @@ -19,6 +19,7 @@ package com.cloud.server; import java.util.Date; +import java.util.List; /** * management server related stats @@ -70,6 +71,10 @@ public interface ManagementServerHostStats { String getOsDistribution(); + List getLastAgents(); + + List getAgents(); + int getAgentCount(); long getHeapMemoryUsed(); diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java index 68204d43253..f041c8342ae 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java @@ -30,6 +30,11 @@ public interface RoleService { ConfigKey EnableDynamicApiChecker = new ConfigKey<>("Advanced", Boolean.class, "dynamic.apichecker.enabled", "false", "If set to true, this enables the dynamic role-based api access checker and disables the default static role-based api access checker.", true); + ConfigKey DynamicApiCheckerCachePeriod = new ConfigKey<>("Advanced", Integer.class, + "dynamic.apichecker.cache.period", "0", + "Defines the expiration time in seconds for the Dynamic API Checker cache, determining how long cached data is retained before being refreshed. If set to zero then caching will be disabled", + false); + boolean isEnabled(); /** diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 20d1d752474..4087d76033c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -1157,9 +1157,12 @@ public class ApiConstants { public static final String LOGOUT = "logout"; public static final String LIST_IDPS = "listIdps"; - public static final String READY_FOR_SHUTDOWN = "readyforshutdown"; + public static final String MAINTENANCE_INITIATED = "maintenanceinitiated"; public static final String SHUTDOWN_TRIGGERED = "shutdowntriggered"; + public static final String READY_FOR_SHUTDOWN = "readyforshutdown"; public static final String PENDING_JOBS_COUNT = "pendingjobscount"; + public static final String AGENTS_COUNT = "agentscount"; + public static final String AGENTS = "agents"; public static final String PUBLIC_MTU = "publicmtu"; public static final String PRIVATE_MTU = "privatemtu"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java index b91e56dcaef..895e9328992 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java @@ -100,7 +100,7 @@ public class ListDomainsCmd extends BaseListCmd implements UserCmd { dv = EnumSet.of(DomainDetails.all); } else { try { - ArrayList dc = new ArrayList(); + ArrayList dc = new ArrayList<>(); for (String detail : viewDetails) { dc.add(DomainDetails.valueOf(detail)); } @@ -142,7 +142,10 @@ public class ListDomainsCmd extends BaseListCmd implements UserCmd { if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelHostMaintenanceCmd.java similarity index 98% rename from api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java rename to api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelHostMaintenanceCmd.java index a514a61b8a4..55fe8ec23ce 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelHostMaintenanceCmd.java @@ -33,7 +33,7 @@ import com.cloud.user.Account; @APICommand(name = "cancelHostMaintenance", description = "Cancels host maintenance.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class CancelMaintenanceCmd extends BaseAsyncCmd { +public class CancelHostMaintenanceCmd extends BaseAsyncCmd { ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java index af87bbf33bb..5e229521efe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ClusterResponse; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ManagementServerResponse; import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.ZoneResponse; @@ -105,6 +106,9 @@ public class ListHostsCmd extends BaseListCmd { @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "hypervisor type of host: XenServer,KVM,VMware,Hyperv,BareMetal,Simulator") private String hypervisor; + @Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = CommandType.UUID, entityType = ManagementServerResponse.class, description = "the id of the management server", since="4.21.0") + private Long managementServerId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -189,6 +193,10 @@ public class ListHostsCmd extends BaseListCmd { return outOfBandManagementPowerState; } + public Long getManagementServerId() { + return managementServerId; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForHostMaintenanceCmd.java similarity index 98% rename from api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java rename to api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForHostMaintenanceCmd.java index 2641c54364e..5c2b50c8723 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForHostMaintenanceCmd.java @@ -33,7 +33,7 @@ import com.cloud.utils.exception.CloudRuntimeException; @APICommand(name = "prepareHostForMaintenance", description = "Prepares a host for maintenance.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class PrepareForMaintenanceCmd extends BaseAsyncCmd { +public class PrepareForHostMaintenanceCmd extends BaseAsyncCmd { ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java index 9157188fdee..bd9ab30f4f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java @@ -157,7 +157,10 @@ public class ListAccountsCmd extends BaseListDomainResourcesCmd implements UserC if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index 56c818f832b..efccb5c09b0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.firewall; import java.util.ArrayList; import java.util.List; +import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -40,6 +41,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; import com.cloud.network.rules.FirewallRule; import com.cloud.user.Account; +import com.cloud.utils.StringUtils; import com.cloud.utils.net.NetUtils; @APICommand(name = "createFirewallRule", description = "Creates a firewall rule for a given IP address", responseObject = FirewallResponse.class, entityType = {FirewallRule.class}, @@ -125,14 +127,13 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal @Override public List getSourceCidrList() { - if (cidrlist != null) { + if (CollectionUtils.isNotEmpty(cidrlist) && !(cidrlist.size() == 1 && StringUtils.isBlank(cidrlist.get(0)))) { return cidrlist; } else { - List oneCidrList = new ArrayList(); + List oneCidrList = new ArrayList<>(); oneCidrList.add(NetUtils.ALL_IP4_CIDRS); return oneCidrList; } - } // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AsyncJobResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AsyncJobResponse.java index 3eeaaef2afa..5b47a7a06e4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AsyncJobResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AsyncJobResponse.java @@ -83,9 +83,13 @@ public class AsyncJobResponse extends BaseResponse { @Param(description = "the unique ID of the instance/entity object related to the job") private String jobInstanceId; - @SerializedName("managementserverid") + @SerializedName(ApiConstants.MANAGEMENT_SERVER_ID) @Param(description = "the msid of the management server on which the job is running", since = "4.19") - private Long msid; + private String managementServerId; + + @SerializedName(ApiConstants.MANAGEMENT_SERVER_NAME) + @Param(description = "the management server name of the host", since = "4.21.0") + private String managementServerName; @SerializedName(ApiConstants.CREATED) @Param(description = " the created date of the job") @@ -156,7 +160,11 @@ public class AsyncJobResponse extends BaseResponse { this.removed = removed; } - public void setMsid(Long msid) { - this.msid = msid; + public void setManagementServerId(String managementServerId) { + this.managementServerId = managementServerId; + } + + public void setManagementServerName(String managementServerName) { + this.managementServerName = managementServerName; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostForMigrationResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostForMigrationResponse.java index 24015e0b459..b4de48baec4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostForMigrationResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostForMigrationResponse.java @@ -16,465 +16,20 @@ // under the License. package org.apache.cloudstack.api.response; -import java.util.Date; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; import com.cloud.host.Host; -import com.cloud.host.Status; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; @EntityReference(value = Host.class) -public class HostForMigrationResponse extends BaseResponse { - @SerializedName(ApiConstants.ID) - @Param(description = "the ID of the host") - private String id; - - @SerializedName(ApiConstants.NAME) - @Param(description = "the name of the host") - private String name; - - @SerializedName(ApiConstants.STATE) - @Param(description = "the state of the host") - private Status state; - - @SerializedName("disconnected") - @Param(description = "true if the host is disconnected. False otherwise.") - private Date disconnectedOn; - - @SerializedName(ApiConstants.TYPE) - @Param(description = "the host type") - private Host.Type hostType; - - @SerializedName("oscategoryid") - @Param(description = "the OS category ID of the host") - private String osCategoryId; - - @SerializedName("oscategoryname") - @Param(description = "the OS category name of the host") - private String osCategoryName; - - @SerializedName(ApiConstants.IP_ADDRESS) - @Param(description = "the IP address of the host") - private String ipAddress; - - @SerializedName(ApiConstants.ZONE_ID) - @Param(description = "the Zone ID of the host") - private String zoneId; - - @SerializedName(ApiConstants.ZONE_NAME) - @Param(description = "the Zone name of the host") - private String zoneName; - - @SerializedName(ApiConstants.POD_ID) - @Param(description = "the Pod ID of the host") - private String podId; - - @SerializedName("podname") - @Param(description = "the Pod name of the host") - private String podName; - - @SerializedName("version") - @Param(description = "the host version") - private String version; - - @SerializedName(ApiConstants.HYPERVISOR) - @Param(description = "the host hypervisor") - private String hypervisor; - - @SerializedName("cpunumber") - @Param(description = "the CPU number of the host") - private Integer cpuNumber; - - @SerializedName("cpuspeed") - @Param(description = "the CPU speed of the host") - private Long cpuSpeed; - - @Deprecated - @SerializedName("cpuallocated") - @Param(description = "the amount of the host's CPU currently allocated") - private String cpuAllocated; - - @SerializedName("cpuallocatedvalue") - @Param(description = "the amount of the host's CPU currently allocated in MHz") - private Long cpuAllocatedValue; - - @SerializedName("cpuallocatedpercentage") - @Param(description = "the amount of the host's CPU currently allocated in percentage") - private String cpuAllocatedPercentage; - - @SerializedName("cpuallocatedwithoverprovisioning") - @Param(description = "the amount of the host's CPU currently allocated after applying the cpu.overprovisioning.factor") - private String cpuAllocatedWithOverprovisioning; - - @SerializedName("cpuused") - @Param(description = "the amount of the host's CPU currently used") - private String cpuUsed; - - @SerializedName("cpuwithoverprovisioning") - @Param(description = "the amount of the host's CPU after applying the cpu.overprovisioning.factor ") - private String cpuWithOverprovisioning; - - @Deprecated - @SerializedName("memorytotal") - @Param(description = "the memory total of the host, this parameter is deprecated use memorywithoverprovisioning") - private Long memoryTotal; - - @SerializedName("memorywithoverprovisioning") - @Param(description = "the amount of the host's memory after applying the mem.overprovisioning.factor ") - private String memWithOverprovisioning; - - @SerializedName("averageload") - @Param(description = "the cpu average load on the host") - private Long averageLoad; - - @SerializedName("networkkbsread") - @Param(description = "the incoming network traffic on the host") - private Long networkKbsRead; - - @SerializedName("networkkbswrite") - @Param(description = "the outgoing network traffic on the host") - private Long networkKbsWrite; - - @Deprecated - @SerializedName("memoryallocated") - @Param(description = "the amount of the host's memory currently allocated") - private String memoryAllocated; - - @SerializedName("memoryallocatedpercentage") - @Param(description = "the amount of the host's memory currently allocated in percentage") - private String memoryAllocatedPercentage; - - @SerializedName("memoryallocatedbytes") - @Param(description = "the amount of the host's memory currently allocated in bytes") - private Long memoryAllocatedBytes; - - @SerializedName("memoryused") - @Param(description = "the amount of the host's memory currently used") - private Long memoryUsed; - - @SerializedName("disksizetotal") - @Param(description = "the total disk size of the host") - private Long diskSizeTotal; - - @SerializedName("disksizeallocated") - @Param(description = "the host's currently allocated disk size") - private Long diskSizeAllocated; - - @SerializedName("capabilities") - @Param(description = "capabilities of the host") - private String capabilities; - - @SerializedName("lastpinged") - @Param(description = "the date and time the host was last pinged") - private Date lastPinged; - - @SerializedName("managementserverid") - @Param(description = "the management server ID of the host") - private Long managementServerId; - - @SerializedName("clusterid") - @Param(description = "the cluster ID of the host") - private String clusterId; - - @SerializedName("clustername") - @Param(description = "the cluster name of the host") - private String clusterName; - - @SerializedName("clustertype") - @Param(description = "the cluster type of the cluster that host belongs to") - private String clusterType; - - @SerializedName("islocalstorageactive") - @Param(description = "true if local storage is active, false otherwise") - private Boolean localStorageActive; - - @SerializedName(ApiConstants.CREATED) - @Param(description = "the date and time the host was created") - private Date created; - - @SerializedName("removed") - @Param(description = "the date and time the host was removed") - private Date removed; - - @SerializedName("events") - @Param(description = "events available for the host") - private String events; - - @SerializedName("hosttags") - @Param(description = "comma-separated list of tags for the host") - private String hostTags; - - @SerializedName("explicithosttags") - @Param(description = "comma-separated list of explicit host tags for the host", since = "4.20.0") - private String explicitHostTags; - - @SerializedName("implicithosttags") - @Param(description = "comma-separated list of implicit host tags for the host", since = "4.20.0") - private String implicitHostTags; - - @SerializedName("hasenoughcapacity") - @Param(description = "true if this host has enough CPU and RAM capacity to migrate a VM to it, false otherwise") - private Boolean hasEnoughCapacity; - - @SerializedName("suitableformigration") - @Param(description = "true if this host is suitable(has enough capacity and satisfies all conditions like hosttags, " + - "max guests vm limit etc) to migrate a VM to it , false otherwise") - private Boolean suitableForMigration; +public class HostForMigrationResponse extends HostResponse { @SerializedName("requiresStorageMotion") @Param(description = "true if migrating a vm to this host requires storage motion, false otherwise") private Boolean requiresStorageMotion; - @SerializedName("resourcestate") - @Param(description = "the resource state of the host") - private String resourceState; - - @SerializedName(ApiConstants.HYPERVISOR_VERSION) - @Param(description = "the hypervisor version") - private String hypervisorVersion; - - @SerializedName(ApiConstants.HA_HOST) - @Param(description = "true if the host is Ha host (dedicated to vms started by HA process; false otherwise") - private Boolean haHost; - - @Override - public String getObjectId() { - return getId(); - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public void setName(String name) { - this.name = name; - } - - public void setState(Status state) { - this.state = state; - } - - public void setDisconnectedOn(Date disconnectedOn) { - this.disconnectedOn = disconnectedOn; - } - - public void setHostType(Host.Type hostType) { - this.hostType = hostType; - } - - public void setOsCategoryId(String osCategoryId) { - this.osCategoryId = osCategoryId; - } - - public void setOsCategoryName(String osCategoryName) { - this.osCategoryName = osCategoryName; - } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public void setZoneId(String zoneId) { - this.zoneId = zoneId; - } - - public void setZoneName(String zoneName) { - this.zoneName = zoneName; - } - - public void setPodId(String podId) { - this.podId = podId; - } - - public void setPodName(String podName) { - this.podName = podName; - } - - public void setVersion(String version) { - this.version = version; - } - - public void setHypervisor(String hypervisor) { - this.hypervisor = hypervisor; - } - - public void setCpuNumber(Integer cpuNumber) { - this.cpuNumber = cpuNumber; - } - - public void setCpuSpeed(Long cpuSpeed) { - this.cpuSpeed = cpuSpeed; - } - - public String getCpuAllocated() { - return cpuAllocated; - } - - public void setCpuAllocated(String cpuAllocated) { - this.cpuAllocated = cpuAllocated; - } - - public void setCpuAllocatedValue(Long cpuAllocatedValue) { - this.cpuAllocatedValue = cpuAllocatedValue; - } - - public void setCpuAllocatedPercentage(String cpuAllocatedPercentage) { - this.cpuAllocatedPercentage = cpuAllocatedPercentage; - } - - public void setCpuAllocatedWithOverprovisioning(String cpuAllocatedWithOverprovisioning) { - this.cpuAllocatedWithOverprovisioning = cpuAllocatedWithOverprovisioning; - } - - public void setCpuUsed(String cpuUsed) { - this.cpuUsed = cpuUsed; - } - - public void setAverageLoad(Long averageLoad) { - this.averageLoad = averageLoad; - } - - public void setNetworkKbsRead(Long networkKbsRead) { - this.networkKbsRead = networkKbsRead; - } - - public void setNetworkKbsWrite(Long networkKbsWrite) { - this.networkKbsWrite = networkKbsWrite; - } - - public void setMemoryAllocated(String memoryAllocated) { - this.memoryAllocated = memoryAllocated; - } - - public void setMemoryAllocatedPercentage(String memoryAllocatedPercentage) { - this.memoryAllocatedPercentage = memoryAllocatedPercentage; - } - - public void setMemoryAllocatedBytes(Long memoryAllocatedBytes) { - this.memoryAllocatedBytes = memoryAllocatedBytes; - } - - public void setMemoryUsed(Long memoryUsed) { - this.memoryUsed = memoryUsed; - } - - public void setDiskSizeTotal(Long diskSizeTotal) { - this.diskSizeTotal = diskSizeTotal; - } - - public void setDiskSizeAllocated(Long diskSizeAllocated) { - this.diskSizeAllocated = diskSizeAllocated; - } - - public void setCapabilities(String capabilities) { - this.capabilities = capabilities; - } - - public void setLastPinged(Date lastPinged) { - this.lastPinged = lastPinged; - } - - public void setManagementServerId(Long managementServerId) { - this.managementServerId = managementServerId; - } - - public void setClusterId(String clusterId) { - this.clusterId = clusterId; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public void setClusterType(String clusterType) { - this.clusterType = clusterType; - } - - public void setLocalStorageActive(Boolean localStorageActive) { - this.localStorageActive = localStorageActive; - } - - public void setCreated(Date created) { - this.created = created; - } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public void setEvents(String events) { - this.events = events; - } - - public String getHostTags() { - return hostTags; - } - - public void setHostTags(String hostTags) { - this.hostTags = hostTags; - } - - public void setExplicitHostTags(String explicitHostTags) { - this.explicitHostTags = explicitHostTags; - } - - public void setImplicitHostTags(String implicitHostTags) { - this.implicitHostTags = implicitHostTags; - } - - public void setHasEnoughCapacity(Boolean hasEnoughCapacity) { - this.hasEnoughCapacity = hasEnoughCapacity; - } - - public void setSuitableForMigration(Boolean suitableForMigration) { - this.suitableForMigration = suitableForMigration; - } - public void setRequiresStorageMotion(Boolean requiresStorageMotion) { this.requiresStorageMotion = requiresStorageMotion; } - - public String getResourceState() { - return resourceState; - } - - public void setResourceState(String resourceState) { - this.resourceState = resourceState; - } - - public String getCpuWithOverprovisioning() { - return cpuWithOverprovisioning; - } - - public void setCpuWithOverprovisioning(String cpuWithOverprovisioning) { - this.cpuWithOverprovisioning = cpuWithOverprovisioning; - } - - public void setMemWithOverprovisioning(String memWithOverprovisioning){ - this.memWithOverprovisioning=memWithOverprovisioning; - } - - public void setHypervisorVersion(String hypervisorVersion) { - this.hypervisorVersion = hypervisorVersion; - } - - public Boolean getHaHost() { - return haHost; - } - - public void setHaHost(Boolean haHost) { - this.haHost = haHost; - } - - public void setMemoryTotal(Long memoryTotal) { - this.memoryTotal = memoryTotal; - } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java index 62bcc07b16d..091d6391b31 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java @@ -186,10 +186,18 @@ public class HostResponse extends BaseResponseWithAnnotations { @Param(description = "the date and time the host was last pinged") private Date lastPinged; - @SerializedName("managementserverid") + @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) + @Param(description = "the virtual machine id for host type ConsoleProxy and SecondaryStorageVM", since = "4.21.0") + private String virtualMachineId; + + @SerializedName(ApiConstants.MANAGEMENT_SERVER_ID) @Param(description = "the management server ID of the host") private String managementServerId; + @SerializedName(ApiConstants.MANAGEMENT_SERVER_NAME) + @Param(description = "the management server name of the host", since = "4.21.0") + private String managementServerName; + @SerializedName("clusterid") @Param(description = "the cluster ID of the host") private String clusterId; @@ -435,10 +443,18 @@ public class HostResponse extends BaseResponseWithAnnotations { this.lastPinged = lastPinged; } + public void setVirtualMachineId(String virtualMachineId) { + this.virtualMachineId = virtualMachineId; + } + public void setManagementServerId(String managementServerId) { this.managementServerId = managementServerId; } + public void setManagementServerName(String managementServerName) { + this.managementServerName = managementServerName; + } + public void setClusterId(String clusterId) { this.clusterId = clusterId; } @@ -723,10 +739,18 @@ public class HostResponse extends BaseResponseWithAnnotations { return lastPinged; } + public String getVirtualMachineId() { + return virtualMachineId; + } + public String getManagementServerId() { return managementServerId; } + public String getManagementServerName() { + return managementServerName; + } + public String getClusterId() { return clusterId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/LoginCmdResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/LoginCmdResponse.java index 84c79d32321..43f92db84cb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/LoginCmdResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/LoginCmdResponse.java @@ -86,6 +86,10 @@ public class LoginCmdResponse extends AuthenticationCmdResponse { @Param(description = "Two factor authentication issuer", since = "4.18.0.0") private String issuerFor2FA; + @SerializedName(value = ApiConstants.MANAGEMENT_SERVER_ID) + @Param(description = "Management Server ID that the user logged to", since = "4.21.0.0") + private String managementServerId; + public String getUsername() { return username; } @@ -211,4 +215,12 @@ public class LoginCmdResponse extends AuthenticationCmdResponse { public void setIssuerFor2FA(String issuerFor2FA) { this.issuerFor2FA = issuerFor2FA; } + + public String getManagementServerId() { + return managementServerId; + } + + public void setManagementServerId(String managementServerId) { + this.managementServerId = managementServerId; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java index fc7d3b722ab..df55a63a060 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java @@ -82,6 +82,14 @@ public class ManagementServerResponse extends BaseResponse { @Param(description = "the Management Server Peers") private List peers; + @SerializedName(ApiConstants.AGENTS_COUNT) + @Param(description = "the number of host agents this Management Server is responsible for", since = "4.21.0.0") + private Long agentsCount; + + @SerializedName(ApiConstants.PENDING_JOBS_COUNT) + @Param(description = "the number of pending jobs in this Management Server", since = "4.21.0.0") + private Long pendingJobsCount; + public String getId() { return this.id; } @@ -126,6 +134,14 @@ public class ManagementServerResponse extends BaseResponse { return serviceIp; } + public Long getAgentsCount() { + return this.agentsCount; + } + + public Long getPendingJobsCount() { + return this.pendingJobsCount; + } + public void setId(String id) { this.id = id; } @@ -174,6 +190,14 @@ public class ManagementServerResponse extends BaseResponse { this.serviceIp = serviceIp; } + public void setAgentsCount(Long agentsCount) { + this.agentsCount = agentsCount; + } + + public void setPendingJobsCount(Long pendingJobsCount) { + this.pendingJobsCount = pendingJobsCount; + } + public String getKernelVersion() { return kernelVersion; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java index a80317c83cd..db811ffbe2d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java @@ -196,6 +196,10 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement @Param(description = "true network requires restart") private Boolean restartRequired; + @SerializedName(ApiConstants.SPECIFY_VLAN) + @Param(description = "true if network supports specifying vlan, false otherwise") + private Boolean specifyVlan; + @SerializedName(ApiConstants.SPECIFY_IP_RANGES) @Param(description = "true if network supports specifying ip ranges, false otherwise") private Boolean specifyIpRanges; @@ -516,6 +520,10 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement this.restartRequired = restartRequired; } + public void setSpecifyVlan(Boolean specifyVlan) { + this.specifyVlan = specifyVlan; + } + public void setSpecifyIpRanges(Boolean specifyIpRanges) { this.specifyIpRanges = specifyIpRanges; } diff --git a/api/src/main/java/org/apache/cloudstack/management/ManagementServerHost.java b/api/src/main/java/org/apache/cloudstack/management/ManagementServerHost.java index 54a53f39578..7f81523dab7 100644 --- a/api/src/main/java/org/apache/cloudstack/management/ManagementServerHost.java +++ b/api/src/main/java/org/apache/cloudstack/management/ManagementServerHost.java @@ -22,7 +22,7 @@ import org.apache.cloudstack.api.InternalIdentity; public interface ManagementServerHost extends InternalIdentity, Identity, ControlledEntity { enum State { - Up, Down, PreparingToShutDown, ReadyToShutDown, ShuttingDown + Up, Down, PreparingForMaintenance, Maintenance, PreparingForShutDown, ReadyToShutDown, ShuttingDown } long getMsid(); diff --git a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java index d670e4d3a88..4f6f1ad66c9 100644 --- a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java +++ b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java @@ -39,7 +39,7 @@ public interface OutOfBandManagementService { long getId(); boolean isOutOfBandManagementEnabled(Host host); void submitBackgroundPowerSyncTask(Host host); - boolean transitionPowerStateToDisabled(List hosts); + boolean transitionPowerStateToDisabled(List hostIds); OutOfBandManagementResponse enableOutOfBandManagement(DataCenter zone); OutOfBandManagementResponse enableOutOfBandManagement(Cluster cluster); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java index 3c9d4cb67ae..45f175e9a81 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.admin.domain; import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; import org.junit.Assert; import org.junit.Test; @@ -71,7 +72,17 @@ public class ListDomainsCmdTest { cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); - Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + Mockito.verify(resourceLimitService).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + } + + @Test + public void testUpdateDomainResponseWithDomainsMinDetails() { + ListDomainsCmd cmd = new ListDomainsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java index 896a7a6c826..a1ba9270345 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.account; import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.AccountResponse; import org.junit.Assert; import org.junit.Test; @@ -58,7 +59,7 @@ public class ListAccountsCmdTest { } @Test - public void testUpdateDomainResponseNoDomains() { + public void testUpdateAccountResponseNoAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; cmd.updateAccountResponse(null); @@ -66,11 +67,21 @@ public class ListAccountsCmdTest { } @Test - public void testUpdateDomainResponseWithDomains() { + public void testUpdateDomainResponseWithAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); } + + @Test + public void testUpdateDomainResponseWithAccountsMinDetails() { + ListAccountsCmd cmd = new ListAccountsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); + } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmdTest.java new file mode 100644 index 00000000000..c905974b2be --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmdTest.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.firewall; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.utils.net.NetUtils; + +@RunWith(MockitoJUnitRunner.class) +public class CreateFirewallRuleCmdTest { + + private void validateAllIp4Cidr(final CreateFirewallRuleCmd cmd) { + Assert.assertTrue(CollectionUtils.isNotEmpty(cmd.getSourceCidrList())); + Assert.assertEquals(1, cmd.getSourceCidrList().size()); + Assert.assertEquals(NetUtils.ALL_IP4_CIDRS, cmd.getSourceCidrList().get(0)); + } + + @Test + public void testGetSourceCidrList_Null() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "cidrlist", null); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_Empty() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "cidrlist", new ArrayList<>()); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_NullFirstElement() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + List list = new ArrayList<>(); + list.add(null); + ReflectionTestUtils.setField(cmd, "cidrlist", list); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_EmptyFirstElement() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "cidrlist", Collections.singletonList(" ")); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_Valid() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + String cidr = "10.1.1.1/22"; + ReflectionTestUtils.setField(cmd, "cidrlist", Collections.singletonList(cidr)); + Assert.assertTrue(CollectionUtils.isNotEmpty(cmd.getSourceCidrList())); + Assert.assertEquals(1, cmd.getSourceCidrList().size()); + Assert.assertEquals(cidr, cmd.getSourceCidrList().get(0)); + } + + @Test + public void testGetSourceCidrList_EmptyFirstElementButMore() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + String cidr = "10.1.1.1/22"; + ReflectionTestUtils.setField(cmd, "cidrlist", Arrays.asList(" ", cidr)); + Assert.assertTrue(CollectionUtils.isNotEmpty(cmd.getSourceCidrList())); + Assert.assertEquals(2, cmd.getSourceCidrList().size()); + Assert.assertEquals(cidr, cmd.getSourceCidrList().get(1)); + } +} diff --git a/client/conf/server.properties.in b/client/conf/server.properties.in index 57d81c81217..0a6078048d3 100644 --- a/client/conf/server.properties.in +++ b/client/conf/server.properties.in @@ -32,6 +32,9 @@ session.timeout=30 # Max allowed API request payload/content size in bytes request.content.size=1048576 +# Max allowed API request form keys +request.max.form.keys=5000 + # Options to configure and enable HTTPS on the management server # # For the management server to pick up these configuration settings, the configured diff --git a/client/pom.xml b/client/pom.xml index 2ef6c910509..e12e0395482 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -624,7 +624,7 @@ org.apache.cloudstack - cloud-plugin-shutdown + cloud-plugin-maintenance ${project.version} diff --git a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java index d2e4483835e..c6fd2ff24dc 100644 --- a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java +++ b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java @@ -82,6 +82,8 @@ public class ServerDaemon implements Daemon { private static final String ACCESS_LOG = "access.log"; private static final String REQUEST_CONTENT_SIZE_KEY = "request.content.size"; private static final int DEFAULT_REQUEST_CONTENT_SIZE = 1048576; + private static final String REQUEST_MAX_FORM_KEYS_KEY = "request.max.form.keys"; + private static final int DEFAULT_REQUEST_MAX_FORM_KEYS = 5000; //////////////////////////////////////////////////////// /////////////// Server Configuration /////////////////// @@ -94,6 +96,7 @@ public class ServerDaemon implements Daemon { private int httpsPort = 8443; private int sessionTimeout = 30; private int maxFormContentSize = DEFAULT_REQUEST_CONTENT_SIZE; + private int maxFormKeys = DEFAULT_REQUEST_MAX_FORM_KEYS; private boolean httpsEnable = false; private String accessLogFile = "access.log"; private String bindInterface = null; @@ -141,6 +144,7 @@ public class ServerDaemon implements Daemon { setAccessLogFile(properties.getProperty(ACCESS_LOG, "access.log")); setSessionTimeout(Integer.valueOf(properties.getProperty(SESSION_TIMEOUT, "30"))); setMaxFormContentSize(Integer.valueOf(properties.getProperty(REQUEST_CONTENT_SIZE_KEY, String.valueOf(DEFAULT_REQUEST_CONTENT_SIZE)))); + setMaxFormKeys(Integer.valueOf(properties.getProperty(REQUEST_MAX_FORM_KEYS_KEY, String.valueOf(DEFAULT_REQUEST_MAX_FORM_KEYS)))); } catch (final IOException e) { logger.warn("Failed to read configuration from server.properties file", e); } finally { @@ -192,6 +196,7 @@ public class ServerDaemon implements Daemon { // Extra config options server.setStopAtShutdown(true); server.setAttribute(ContextHandler.MAX_FORM_CONTENT_SIZE_KEY, maxFormContentSize); + server.setAttribute(ContextHandler.MAX_FORM_KEYS_KEY, maxFormKeys); // HTTPS Connector createHttpsConnector(httpConfig); @@ -264,6 +269,7 @@ public class ServerDaemon implements Daemon { webApp.setContextPath(contextPath); webApp.setInitParameter("org.eclipse.jetty.servlet.Default.dirAllowed", "false"); webApp.setMaxFormContentSize(maxFormContentSize); + webApp.setMaxFormKeys(maxFormKeys); // GZIP handler final GzipHandler gzipHandler = new GzipHandler(); @@ -366,4 +372,8 @@ public class ServerDaemon implements Daemon { public void setMaxFormContentSize(int maxFormContentSize) { this.maxFormContentSize = maxFormContentSize; } + + public void setMaxFormKeys(int maxFormKeys) { + this.maxFormKeys = maxFormKeys; + } } diff --git a/core/src/main/java/com/cloud/agent/api/MigrateAgentConnectionAnswer.java b/core/src/main/java/com/cloud/agent/api/MigrateAgentConnectionAnswer.java new file mode 100644 index 00000000000..33d32c7f6cc --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateAgentConnectionAnswer.java @@ -0,0 +1,38 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +public class MigrateAgentConnectionAnswer extends Answer { + public MigrateAgentConnectionAnswer() { + } + + public MigrateAgentConnectionAnswer(boolean result) { + this.result = result; + } + + public MigrateAgentConnectionAnswer(String details) { + this.result = false; + this.details = details; + } + + public MigrateAgentConnectionAnswer(MigrateAgentConnectionCommand cmd, boolean result) { + super(cmd, result, null); + } +} diff --git a/core/src/main/java/com/cloud/agent/api/MigrateAgentConnectionCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateAgentConnectionCommand.java new file mode 100644 index 00000000000..9471a68669f --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateAgentConnectionCommand.java @@ -0,0 +1,61 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import java.util.List; + +public class MigrateAgentConnectionCommand extends Command { + private List msList; + private List avoidMsList; + private String lbAlgorithm; + private Long lbCheckInterval; + + public MigrateAgentConnectionCommand() { + } + + public MigrateAgentConnectionCommand(final List msList, final List avoidMsList, final String lbAlgorithm, final Long lbCheckInterval) { + super(); + this.msList = msList; + this.avoidMsList = avoidMsList; + this.lbAlgorithm = lbAlgorithm; + this.lbCheckInterval = lbCheckInterval; + } + + public List getMsList() { + return msList; + } + + public List getAvoidMsList() { + return avoidMsList; + } + + public String getLbAlgorithm() { + return lbAlgorithm; + } + + public Long getLbCheckInterval() { + return lbCheckInterval; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/StartupCommand.java b/core/src/main/java/com/cloud/agent/api/StartupCommand.java index cca5e16b585..7a18ba2dccc 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupCommand.java +++ b/core/src/main/java/com/cloud/agent/api/StartupCommand.java @@ -47,6 +47,7 @@ public class StartupCommand extends Command { String resourceName; String gatewayIpAddress; String msHostList; + boolean connectionTransferred; String arch; public StartupCommand(Host.Type type) { @@ -291,6 +292,14 @@ public class StartupCommand extends Command { this.msHostList = msHostList; } + public boolean isConnectionTransferred() { + return connectionTransferred; + } + + public void setConnectionTransferred(boolean connectionTransferred) { + this.connectionTransferred = connectionTransferred; + } + public String getArch() { return arch; } diff --git a/core/src/main/java/com/cloud/agent/api/TransferAgentCommand.java b/core/src/main/java/com/cloud/agent/api/TransferAgentCommand.java index ab74d9bcf85..9c6b3b5fc59 100644 --- a/core/src/main/java/com/cloud/agent/api/TransferAgentCommand.java +++ b/core/src/main/java/com/cloud/agent/api/TransferAgentCommand.java @@ -25,6 +25,7 @@ public class TransferAgentCommand extends Command { protected long agentId; protected long futureOwner; protected long currentOwner; + protected boolean isConnectionTransfer; Event event; protected TransferAgentCommand() { @@ -37,6 +38,11 @@ public class TransferAgentCommand extends Command { this.event = event; } + public TransferAgentCommand(long agentId, long currentOwner, long futureOwner, Event event, boolean isConnectionTransfer) { + this(agentId, currentOwner, futureOwner, event); + this.isConnectionTransfer = isConnectionTransfer; + } + public long getAgentId() { return agentId; } @@ -53,6 +59,10 @@ public class TransferAgentCommand extends Command { return currentOwner; } + public boolean isConnectionTransfer() { + return isConnectionTransfer; + } + @Override public boolean executeInSequence() { return false; diff --git a/core/src/main/java/com/cloud/resource/ServerResource.java b/core/src/main/java/com/cloud/resource/ServerResource.java index 1602a78d9a4..845ac8a48fa 100644 --- a/core/src/main/java/com/cloud/resource/ServerResource.java +++ b/core/src/main/java/com/cloud/resource/ServerResource.java @@ -50,6 +50,10 @@ public interface ServerResource extends Manager { */ StartupCommand[] initialize(); + default StartupCommand[] initialize(boolean isTransferredConnection) { + return initialize(); + } + /** * @param id id of the server to put in the PingCommand * @return PingCommand @@ -78,4 +82,12 @@ public interface ServerResource extends Manager { void setAgentControl(IAgentControl agentControl); + default boolean isExitOnFailures() { + return true; + } + + default boolean isAppendAgentNameToLogs() { + return false; + } + } diff --git a/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java b/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java index 287769d6a76..be7563be045 100644 --- a/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java +++ b/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java @@ -189,6 +189,11 @@ public class CheckOnHostCommandTest { return 2L; }; + @Override + public Long getLastManagementServerId() { + return null; + }; + @Override public Date getRemoved() { Date date = null; diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index e8ffd86ac4f..94c73d8f4d6 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -22,7 +22,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; @@ -38,6 +37,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -101,6 +101,10 @@ public interface VirtualMachineManager extends Manager { "refer documentation", true, ConfigKey.Scope.Zone); + ConfigKey VmSyncPowerStateTransitioning = new ConfigKey<>("Advanced", Boolean.class, "vm.sync.power.state.transitioning", "true", + "Whether to sync power states of the transitioning and stalled VMs while processing VM power reports.", false); + + interface Topics { String VM_POWER_STATE = "vm.powerstate"; } @@ -286,24 +290,22 @@ public interface VirtualMachineManager extends Manager { /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host + * @param host host * @param vmIds list of VM IDs * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds); + HashMap getVirtualMachineStatistics(Host host, List vmIds); /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host - * @param vmMap map of VM IDs and the corresponding VirtualMachine object + * @param host host + * @param vmMap map of VM instanceName and its ID * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap); + HashMap getVirtualMachineStatistics(Host host, Map vmMap); - HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap); - HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap); Map getDiskOfferingSuitabilityForVm(long vmId, List diskOfferingIds); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java index d8e97f0277b..8463d9cee98 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java @@ -82,6 +82,9 @@ public interface NetworkOrchestrationService { ConfigKey NetworkLockTimeout = new ConfigKey(Integer.class, NetworkLockTimeoutCK, "Network", "600", "Lock wait timeout (seconds) while implementing network", true, Scope.Global, null); + ConfigKey DeniedRoutes = new ConfigKey(String.class, "denied.routes", "Network", "", + "Routes that are denied, can not be used for Static Routes creation for the VPC Private Gateway", true, ConfigKey.Scope.Zone, null); + ConfigKey GuestDomainSuffix = new ConfigKey(String.class, GuestDomainSuffixCK, "Network", "cloud.internal", "Default domain name for vms inside virtualized networks fronted by router", true, ConfigKey.Scope.Zone, null); diff --git a/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java b/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java index 81525ca13f1..82e2d29f407 100644 --- a/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java +++ b/engine/components-api/src/main/java/com/cloud/agent/AgentManager.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.agent; +import java.util.List; import java.util.Map; import org.apache.cloudstack.framework.config.ConfigKey; @@ -170,4 +171,10 @@ public interface AgentManager { void notifyMonitorsOfRemovedHost(long hostId, long clusterId); void propagateChangeToAgents(Map params); + + boolean transferDirectAgentsFromMS(String fromMsUuid, long fromMsId, long timeoutDurationInMs); + + List getLastAgents(); + + void setLastAgents(List lastAgents); } diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index cbd137e8682..c3d45b98b00 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -16,14 +16,11 @@ // under the License. package com.cloud.capacity; -import java.util.Map; - import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; -import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -130,6 +127,10 @@ public interface CapacityManager { true, ConfigKey.Scope.Zone); + ConfigKey CapacityCalculateWorkers = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class, + "capacity.calculate.workers", "1", + "Number of worker threads to be used for capacities calculation", true); + public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId); void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost); @@ -145,8 +146,6 @@ public interface CapacityManager { void updateCapacityForHost(Host host); - void updateCapacityForHost(Host host, Map offeringsMap); - /** * @param pool storage pool * @param templateForVmCreation template that will be used for vm creation @@ -163,12 +162,12 @@ public interface CapacityManager { /** * Check if specified host has capability to support cpu cores and speed freq - * @param hostId the host to be checked + * @param host the host to be checked * @param cpuNum cpu number to check * @param cpuSpeed cpu Speed to check * @return true if the count of host's running VMs >= hypervisor limit */ - boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed); + boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed); /** * Check if cluster will cross threshold if the cpu/memory requested are accommodated diff --git a/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java b/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java index 728f5a2b180..ddc8153d739 100644 --- a/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java +++ b/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java @@ -84,6 +84,13 @@ public interface HighAvailabilityManager extends Manager { HA; // Restart a VM. } + enum ReasonType { + Unknown, + HostMaintenance, + HostDown, + HostDegraded; + } + enum Step { Scheduled, Investigating, Fencing, Stopping, Restarting, Migrating, Cancelled, Done, Error, } @@ -92,7 +99,7 @@ public interface HighAvailabilityManager extends Manager { * Investigate why a host has disconnected and migrate the VMs on it * if necessary. * - * @param host - the host that has disconnected. + * @param hostId - the id of the host that has disconnected. */ Status investigate(long hostId); @@ -109,17 +116,19 @@ public interface HighAvailabilityManager extends Manager { * @param investigate must be investigated before we do anything with this vm. */ void scheduleRestart(VMInstanceVO vm, boolean investigate); + void scheduleRestart(VMInstanceVO vm, boolean investigate, ReasonType reasonType); void cancelDestroy(VMInstanceVO vm, Long hostId); - boolean scheduleDestroy(VMInstanceVO vm, long hostId); + boolean scheduleDestroy(VMInstanceVO vm, long hostId, ReasonType reasonType); /** * Schedule restarts for all vms running on the host. * @param host host. - * @param investigate TODO + * @param investigate whether to investigate + * @param reasonType reason for HA work */ - void scheduleRestartForVmsOnHost(HostVO host, boolean investigate); + void scheduleRestartForVmsOnHost(HostVO host, boolean investigate, ReasonType reasonType); /** * Schedule the vm for migration. @@ -128,6 +137,7 @@ public interface HighAvailabilityManager extends Manager { * @return true if schedule worked. */ boolean scheduleMigration(VMInstanceVO vm); + boolean scheduleMigration(VMInstanceVO vm, ReasonType reasonType); List findTakenMigrationWork(); @@ -140,10 +150,11 @@ public interface HighAvailabilityManager extends Manager { * 3. Check if a VM has been stopped: WorkType.CheckStop * * @param vm virtual machine to stop. - * @param host host the virtual machine is on. + * @param hostId the id of the host the virtual machine is on. * @param type which type of stop is requested. */ boolean scheduleStop(VMInstanceVO vm, long hostId, WorkType type); + boolean scheduleStop(VMInstanceVO vm, long hostId, WorkType type, ReasonType reasonType); void cancelScheduledMigrations(HostVO host); diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index 343ad0fa212..3e65ddf78e2 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -85,6 +85,8 @@ public interface ResourceManager extends ResourceService, Configurable { public Host createHostAndAgent(Long hostId, ServerResource resource, Map details, boolean old, List hostTags, boolean forRebalance); + public Host createHostAndAgent(Long hostId, ServerResource resource, Map details, boolean old, List hostTags, boolean forRebalance, boolean isTransferredConnection); + public Host addHost(long zoneId, ServerResource resource, Type hostType, Map hostDetails); public HostVO createHostVOForConnectedAgent(StartupCommand[] cmds); @@ -138,13 +140,13 @@ public interface ResourceManager extends ResourceService, Configurable { public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, long dcId, long clusterId); - public List listAvailHypervisorInZone(Long hostId, Long zoneId); + public List listAvailHypervisorInZone(Long zoneId); public HostVO findHostByGuid(String guid); public HostVO findHostByName(String name); - HostStats getHostStatistics(long hostId); + HostStats getHostStatistics(Host host); Long getGuestOSCategoryId(long hostId); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 0b9f7bcb7db..7b31ec6a81b 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -42,6 +43,7 @@ import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; @@ -214,6 +216,10 @@ public interface StorageManager extends StorageService { "when resize a volume upto resize capacity disable threshold (pool.storage.allocated.resize.capacity.disablethreshold)", true, ConfigKey.Scope.Zone); + ConfigKey StoragePoolHostConnectWorkers = new ConfigKey<>("Storage", Integer.class, + "storage.pool.host.connect.workers", "1", + "Number of worker threads to be used to connect hosts to a primary storage", true); + /** * should we execute in sequence not involving any storages? * @return tru if commands should execute in sequence @@ -365,6 +371,9 @@ public interface StorageManager extends StorageService { String getStoragePoolMountFailureReason(String error); + void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, + boolean handleStorageConflictException, boolean errorOnNoUpHost) throws CloudRuntimeException; + boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml index bf8ab14c952..437c98dac87 100755 --- a/engine/orchestration/pom.xml +++ b/engine/orchestration/pom.xml @@ -70,7 +70,7 @@ org.apache.cloudstack - cloud-plugin-shutdown + cloud-plugin-maintenance ${project.version} diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 63e97519534..765602e42d0 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -16,8 +16,10 @@ // under the License. package com.cloud.agent.manager; +import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; +import java.net.SocketAddress; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; import java.util.Arrays; @@ -25,23 +27,20 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.configuration.Config; -import com.cloud.org.Cluster; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.GlobalLock; import org.apache.cloudstack.agent.lb.IndirectAgentLB; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -50,11 +49,17 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; +import org.apache.cloudstack.maintenance.ManagementServerMaintenanceListener; +import org.apache.cloudstack.maintenance.ManagementServerMaintenanceManager; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.MapUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -81,6 +86,9 @@ import com.cloud.agent.api.UnsupportedAnswer; import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.alert.AlertManager; +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.configuration.Config; import com.cloud.configuration.ManagementServiceConfiguration; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; @@ -100,15 +108,18 @@ import com.cloud.host.Status.Event; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.org.Cluster; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; @@ -123,26 +134,23 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.nio.NioServer; import com.cloud.utils.nio.Task; import com.cloud.utils.time.InaccurateClock; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.ThreadContext; /** * Implementation of the Agent Manager. This class controls the connection to the agents. **/ -public class AgentManagerImpl extends ManagerBase implements AgentManager, HandlerFactory, Configurable { +public class AgentManagerImpl extends ManagerBase implements AgentManager, HandlerFactory, ManagementServerMaintenanceListener, Configurable { /** * _agents is a ConcurrentHashMap, but it is used from within a synchronized block. This will be reported by findbugs as JLM_JSR166_UTILCONCURRENT_MONITORENTER. Maybe a * ConcurrentHashMap is not the right thing to use here, but i'm not sure so i leave it alone. */ - protected ConcurrentHashMap _agents = new ConcurrentHashMap(10007); - protected List> _hostMonitors = new ArrayList>(17); - protected List> _cmdMonitors = new ArrayList>(17); - protected List> _creationMonitors = new ArrayList>(17); - protected List _loadingAgents = new ArrayList(); + protected ConcurrentHashMap _agents = new ConcurrentHashMap<>(10007); + protected List> _hostMonitors = new ArrayList<>(17); + protected List> _cmdMonitors = new ArrayList<>(17); + protected List> _creationMonitors = new ArrayList<>(17); + protected List _loadingAgents = new ArrayList<>(); protected Map _commandTimeouts = new HashMap<>(); private int _monitorId = 0; - private final Lock _agentStatusLock = new ReentrantLock(); @Inject protected CAManager caService; @@ -153,6 +161,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Inject protected HostDao _hostDao = null; @Inject + private ManagementServerHostDao _mshostDao; + @Inject protected OutOfBandManagementDao outOfBandManagementDao; @Inject protected DataCenterDao _dcDao = null; @@ -174,6 +184,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Inject protected IndirectAgentLB indirectAgentLB; + @Inject + private ManagementServerMaintenanceManager managementServerMaintenanceManager; + protected int _retry = 2; protected long _nodeId = -1; @@ -186,26 +199,39 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl private int _directAgentThreadCap; + private List lastAgents = null; + protected StateMachine2 _statusStateMachine = Status.getStateMachine(); - private final ConcurrentHashMap _pingMap = new ConcurrentHashMap(10007); + private final ConcurrentHashMap _pingMap = new ConcurrentHashMap<>(10007); + private int maxConcurrentNewAgentConnections; + private final ConcurrentHashMap newAgentConnections = new ConcurrentHashMap<>(); + protected ScheduledExecutorService newAgentConnectionsMonitor; @Inject ResourceManager _resourceMgr; @Inject ManagementServiceConfiguration mgmtServiceConf; - protected final ConfigKey Workers = new ConfigKey("Advanced", Integer.class, "workers", "5", + protected final ConfigKey Workers = new ConfigKey<>("Advanced", Integer.class, "workers", "5", "Number of worker threads handling remote agent connections.", false); - protected final ConfigKey Port = new ConfigKey("Advanced", Integer.class, "port", "8250", "Port to listen on for remote agent connections.", false); - protected final ConfigKey AlertWait = new ConfigKey("Advanced", Integer.class, "alert.wait", "1800", + protected final ConfigKey Port = new ConfigKey<>("Advanced", Integer.class, "port", "8250", "Port to listen on for remote agent connections.", false); + protected final ConfigKey RemoteAgentSslHandshakeTimeout = new ConfigKey<>("Advanced", + Integer.class, "agent.ssl.handshake.timeout", "30", + "Seconds after which SSL handshake times out during remote agent connections.", false); + protected final ConfigKey RemoteAgentMaxConcurrentNewConnections = new ConfigKey<>("Advanced", + Integer.class, "agent.max.concurrent.new.connections", "0", + "Number of maximum concurrent new connections server allows for remote agents. " + + "If set to zero (default value) then no limit will be enforced on concurrent new connections", + false); + protected final ConfigKey AlertWait = new ConfigKey<>("Advanced", Integer.class, "alert.wait", "1800", "Seconds to wait before alerting on a disconnected agent", true); - protected final ConfigKey DirectAgentLoadSize = new ConfigKey("Advanced", Integer.class, "direct.agent.load.size", "16", + protected final ConfigKey DirectAgentLoadSize = new ConfigKey<>("Advanced", Integer.class, "direct.agent.load.size", "16", "The number of direct agents to load each time", false); - protected final ConfigKey DirectAgentPoolSize = new ConfigKey("Advanced", Integer.class, "direct.agent.pool.size", "500", + protected final ConfigKey DirectAgentPoolSize = new ConfigKey<>("Advanced", Integer.class, "direct.agent.pool.size", "500", "Default size for DirectAgentPool", false); - protected final ConfigKey DirectAgentThreadCap = new ConfigKey("Advanced", Float.class, "direct.agent.thread.cap", "1", + protected final ConfigKey DirectAgentThreadCap = new ConfigKey<>("Advanced", Float.class, "direct.agent.thread.cap", "1", "Percentage (as a value between 0 and 1) of direct.agent.pool.size to be used as upper thread cap for a single direct agent to process requests", false); - protected final ConfigKey CheckTxnBeforeSending = new ConfigKey("Developer", Boolean.class, "check.txn.before.sending.agent.commands", "false", + protected final ConfigKey CheckTxnBeforeSending = new ConfigKey<>("Developer", Boolean.class, "check.txn.before.sending.agent.commands", "false", "This parameter allows developers to enable a check to see if a transaction wraps commands that are sent to the resource. This is not to be enabled on production systems.", true); @Override @@ -213,8 +239,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout()); - final int threads = DirectAgentLoadSize.value(); - _nodeId = ManagementServerNode.getManagementServerId(); logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId); @@ -225,24 +249,34 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl registerForHostEvents(new SetHostParamsListener(), true, true, false); - _executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentTaskPool")); + managementServerMaintenanceManager.registerListener(this); - _connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentConnectTaskPool")); + final int agentTaskThreads = DirectAgentLoadSize.value(); + + _executor = new ThreadPoolExecutor(agentTaskThreads, agentTaskThreads, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentTaskPool")); + + _connectExecutor = new ThreadPoolExecutor(100, 500, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentConnectTaskPool")); // allow core threads to time out even when there are no items in the queue _connectExecutor.allowCoreThreadTimeOut(true); - _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService); + maxConcurrentNewAgentConnections = RemoteAgentMaxConcurrentNewConnections.value(); + + _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, + this, caService, RemoteAgentSslHandshakeTimeout.value()); logger.info("Listening on {} with {} workers.", Port.value(), Workers.value()); + final int directAgentPoolSize = DirectAgentPoolSize.value(); // executes all agent commands other than cron and ping - _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); + _directAgentExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands - _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value()); - _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 + _cronJobExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgentCronJob")); + logger.debug("Created DirectAgentAttache pool with size: {}.", directAgentPoolSize); + _directAgentThreadCap = Math.round(directAgentPoolSize * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); + newAgentConnectionsMonitor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("NewAgentConnectionsMonitor")); + initializeCommandTimeouts(); return true; @@ -253,22 +287,44 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return new AgentHandler(type, link, data); } + @Override + public int getMaxConcurrentNewConnectionsCount() { + return maxConcurrentNewAgentConnections; + } + + @Override + public int getNewConnectionsCount() { + return newAgentConnections.size(); + } + + @Override + public void registerNewConnection(SocketAddress address) { + logger.trace("Adding new agent connection from {}", address.toString()); + newAgentConnections.putIfAbsent(address.toString(), System.currentTimeMillis()); + } + + @Override + public void unregisterNewConnection(SocketAddress address) { + logger.trace("Removing new agent connection for {}", address.toString()); + newAgentConnections.remove(address.toString()); + } + @Override public int registerForHostEvents(final Listener listener, final boolean connections, final boolean commands, final boolean priority) { synchronized (_hostMonitors) { _monitorId++; if (connections) { if (priority) { - _hostMonitors.add(0, new Pair(_monitorId, listener)); + _hostMonitors.add(0, new Pair<>(_monitorId, listener)); } else { - _hostMonitors.add(new Pair(_monitorId, listener)); + _hostMonitors.add(new Pair<>(_monitorId, listener)); } } if (commands) { if (priority) { - _cmdMonitors.add(0, new Pair(_monitorId, listener)); + _cmdMonitors.add(0, new Pair<>(_monitorId, listener)); } else { - _cmdMonitors.add(new Pair(_monitorId, listener)); + _cmdMonitors.add(new Pair<>(_monitorId, listener)); } } logger.debug("Registering listener {} with id {}", listener.getClass().getSimpleName(), _monitorId); @@ -281,9 +337,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl synchronized (_hostMonitors) { _monitorId++; if (priority) { - _creationMonitors.add(0, new Pair(_monitorId, creator)); + _creationMonitors.add(0, new Pair<>(_monitorId, creator)); } else { - _creationMonitors.add(new Pair(_monitorId, creator)); + _creationMonitors.add(new Pair<>(_monitorId, creator)); } return _monitorId; } @@ -295,8 +351,47 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _hostMonitors.remove(id); } + @Override + public void onManagementServerMaintenance() { + logger.debug("Management server maintenance enabled"); + _monitorExecutor.shutdownNow(); + if (_connection != null) { + _connection.stop(); + + try { + _connection.cleanUp(); + } catch (final IOException e) { + logger.warn("Fail to clean up old connection", e); + } + } + _connectExecutor.shutdownNow(); + } + + @Override + public void onManagementServerCancelMaintenance() { + logger.debug("Management server maintenance disabled"); + if (_connectExecutor.isShutdown()) { + _connectExecutor = new ThreadPoolExecutor(100, 500, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentConnectTaskPool")); + _connectExecutor.allowCoreThreadTimeOut(true); + } + + startDirectlyConnectedHosts(true); + if (_connection != null) { + try { + _connection.start(); + } catch (final NioConnectionException e) { + logger.error("Error when connecting to the NioServer!", e); + } + } + + if (_monitorExecutor.isShutdown()) { + _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); + _monitorExecutor.scheduleWithFixedDelay(new MonitorTask(), mgmtServiceConf.getPingInterval(), mgmtServiceConf.getPingInterval(), TimeUnit.SECONDS); + } + } + private AgentControlAnswer handleControlCommand(final AgentAttache attache, final AgentControlCommand cmd) { - AgentControlAnswer answer = null; + AgentControlAnswer answer; for (final Pair listener : _cmdMonitors) { answer = listener.second().processControlCommand(attache.getId(), cmd); @@ -324,13 +419,23 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } public AgentAttache findAttache(final long hostId) { - AgentAttache attache = null; + AgentAttache attache; synchronized (_agents) { attache = _agents.get(hostId); } return attache; } + @Override + public List getLastAgents() { + return lastAgents; + } + + @Override + public void setLastAgents(List lastAgents) { + this.lastAgents = lastAgents; + } + @Override public Answer sendTo(final Long dcId, final HypervisorType type, final Command cmd) { final List clusters = _clusterDao.listByDcHyType(dcId, type.toString()); @@ -366,12 +471,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl cmds.addCommand(cmd); send(hostId, cmds, cmd.getWait()); final Answer[] answers = cmds.getAnswers(); - if (answers != null && !(answers[0] instanceof UnsupportedAnswer)) { - return answers[0]; - } - - if (answers != null && answers[0] instanceof UnsupportedAnswer) { - logger.warn("Unsupported Command: {}", answers[0].getDetails()); + if (answers != null) { + if (answers[0] instanceof UnsupportedAnswer) { + logger.warn("Unsupported Command: {}", answers[0].getDetails()); + } return answers[0]; } @@ -402,8 +505,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } /** - * @param commands - * @return + * @param commands object container of commands + * @return array of commands */ private Command[] checkForCommandsAndTag(final Commands commands) { final Command[] cmds = commands.toCommands(); @@ -419,8 +522,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } /** - * @param commands - * @param cmds + * @param commands object container of commands + * @param cmds array of commands */ private void setEmptyAnswers(final Commands commands, final Command[] cmds) { if (cmds.length == 0) { @@ -459,7 +562,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl String commandWaits = GranularWaitTimeForCommands.value().trim(); if (StringUtils.isNotEmpty(commandWaits)) { _commandTimeouts = getCommandTimeoutsMap(commandWaits); - logger.info(String.format("Timeouts for management server internal commands successfully initialized from global setting commands.timeout: %s", _commandTimeouts)); + logger.info("Timeouts for management server internal commands successfully initialized from global setting commands.timeout: {}", _commandTimeouts); } } @@ -475,10 +578,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl int commandTimeout = Integer.parseInt(parts[1].trim()); commandTimeouts.put(commandName, commandTimeout); } catch (NumberFormatException e) { - logger.error(String.format("Initialising the timeouts using commands.timeout: %s for management server internal commands failed with error %s", commandPair, e.getMessage())); + logger.error("Initialising the timeouts using commands.timeout: {} for management server internal commands failed with error {}", commandPair, e.getMessage()); } } else { - logger.error(String.format("Error initialising the timeouts for management server internal commands. Invalid format in commands.timeout: %s", commandPair)); + logger.error("Error initialising the timeouts for management server internal commands. Invalid format in commands.timeout: {}", commandPair); } } return commandTimeouts; @@ -492,7 +595,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } int wait = getTimeout(commands, timeout); - logger.debug(String.format("Wait time setting on %s is %d seconds", commands, wait)); + logger.debug("Wait time setting on {} is {} seconds", commands, wait); for (Command cmd : commands) { String simpleCommandName = cmd.getClass().getSimpleName(); Integer commandTimeout = _commandTimeouts.get(simpleCommandName); @@ -579,7 +682,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final long hostId = attache.getId(); logger.debug("Remove Agent : {}", attache); - AgentAttache removed = null; + AgentAttache removed; boolean conflict = false; synchronized (_agents) { removed = _agents.remove(hostId); @@ -615,38 +718,32 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); for (final Pair monitor : _hostMonitors) { - logger.debug("Sending Connect to listener: {}", monitor.second().getClass().getSimpleName()); + logger.debug("Sending Connect to listener: {}, for rebalance: {}", monitor.second().getClass().getSimpleName(), forRebalance); for (int i = 0; i < cmd.length; i++) { try { + logger.debug("process connection to issue: {} for host: {}, forRebalance: {}, connection transferred: {}", ReflectionToStringBuilderUtils.reflectCollection(cmd[i]), hostId, forRebalance, cmd[i].isConnectionTransferred()); monitor.second().processConnect(host, cmd[i], forRebalance); - } catch (final Exception e) { - if (e instanceof ConnectionException) { - final ConnectionException ce = (ConnectionException)e; - if (ce.isSetupError()) { - logger.warn("Monitor {} says there is an error in the connect process for {} due to {}", - monitor.second().getClass().getSimpleName(), host, e.getMessage()); - handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw ce; - } else { - logger.info("Monitor {} says not to continue the connect process for {} due to {}", - monitor.second().getClass().getSimpleName(), host, e.getMessage()); - handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - return attache; - } - } else if (e instanceof HypervisorVersionChangedException) { - handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); - } else { - logger.error("Monitor {} says there is an error in the connect process for {} due to {}", - monitor.second().getClass().getSimpleName(), host, e.getMessage(), e); + } catch (final ConnectionException ce) { + if (ce.isSetupError()) { + logger.warn("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, ce.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); + throw ce; + } else { + logger.info("Monitor {} says not to continue the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, ce.getMessage()); + handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); + return attache; } + } catch (final HypervisorVersionChangedException hvce) { + handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); + throw new CloudRuntimeException("Unable to connect " + attache.getId(), hvce); + } catch (final Exception e) { + logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); + handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); + throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } } } - final Long dcId = host.getDataCenterId(); final ReadyCommand ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); ready.setWait(ReadyCommandWait.value()); final Answer answer = easySend(hostId, ready); @@ -679,7 +776,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean start() { - startDirectlyConnectedHosts(); + ManagementServerHostVO msHost = _mshostDao.findByMsid(_nodeId); + if (msHost != null && (ManagementServerHost.State.Maintenance.equals(msHost.getState()) || ManagementServerHost.State.PreparingForMaintenance.equals(msHost.getState()))) { + _monitorExecutor.shutdownNow(); + return true; + } + + startDirectlyConnectedHosts(false); if (_connection != null) { try { @@ -691,13 +794,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _monitorExecutor.scheduleWithFixedDelay(new MonitorTask(), mgmtServiceConf.getPingInterval(), mgmtServiceConf.getPingInterval(), TimeUnit.SECONDS); + final int cleanupTime = Wait.value(); + newAgentConnectionsMonitor.scheduleAtFixedRate(new AgentNewConnectionsMonitorTask(), cleanupTime, + cleanupTime, TimeUnit.MINUTES); + return true; } - public void startDirectlyConnectedHosts() { + public void startDirectlyConnectedHosts(final boolean forRebalance) { final List hosts = _resourceMgr.findDirectlyConnectedHosts(); for (final HostVO host : hosts) { - loadDirectlyConnectedHost(host, false); + loadDirectlyConnectedHost(host, forRebalance); } } @@ -709,25 +816,25 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Constructor constructor = clazz.getConstructor(); resource = (ServerResource)constructor.newInstance(); } catch (final ClassNotFoundException e) { - logger.warn("Unable to find class " + host.getResource(), e); + logger.warn("Unable to find class {}", host.getResource(), e); } catch (final InstantiationException e) { - logger.warn("Unable to instantiate class " + host.getResource(), e); + logger.warn("Unable to instantiate class {}", host.getResource(), e); } catch (final IllegalAccessException e) { - logger.warn("Illegal access " + host.getResource(), e); + logger.warn("Illegal access {}", host.getResource(), e); } catch (final SecurityException e) { - logger.warn("Security error on " + host.getResource(), e); + logger.warn("Security error on {}", host.getResource(), e); } catch (final NoSuchMethodException e) { - logger.warn("NoSuchMethodException error on " + host.getResource(), e); + logger.warn("NoSuchMethodException error on {}", host.getResource(), e); } catch (final IllegalArgumentException e) { - logger.warn("IllegalArgumentException error on " + host.getResource(), e); + logger.warn("IllegalArgumentException error on {}", host.getResource(), e); } catch (final InvocationTargetException e) { - logger.warn("InvocationTargetException error on " + host.getResource(), e); + logger.warn("InvocationTargetException error on {}", host.getResource(), e); } if (resource != null) { _hostDao.loadDetails(host); - final HashMap params = new HashMap(host.getDetails().size() + 5); + final HashMap params = new HashMap<>(host.getDetails().size() + 5); params.putAll(host.getDetails()); params.put("guid", host.getGuid()); @@ -737,7 +844,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (host.getClusterId() != null) { params.put("cluster", Long.toString(host.getClusterId())); - String guid = null; + String guid; final ClusterVO cluster = _clusterDao.findById(host.getClusterId()); if (cluster.getGuid() == null) { guid = host.getDetail("pool"); @@ -772,8 +879,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean forRebalance) { + return loadDirectlyConnectedHost(host, forRebalance, false); + } + + protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean forRebalance, final boolean isTransferredConnection) { boolean initialized = false; - ServerResource resource = null; + ServerResource resource; try { // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); @@ -800,21 +911,21 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (forRebalance) { tapLoadingAgents(host.getId(), TapAgentsAction.Add); - final Host h = _resourceMgr.createHostAndAgent(host.getId(), resource, host.getDetails(), false, null, true); + final Host h = _resourceMgr.createHostAndAgent(host.getId(), resource, host.getDetails(), false, null, true, isTransferredConnection); tapLoadingAgents(host.getId(), TapAgentsAction.Del); - return h == null ? false : true; + return h != null; } else { _executor.execute(new SimulateStartTask(host.getId(), host.getUuid(), host.getName(), resource, host.getDetails())); return true; } } - protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { + protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("create DirectAgentAttache for {}", host); final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), resource, host.isInMaintenanceStates()); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.put(host.getId(), attache); } @@ -848,6 +959,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _connectExecutor.shutdownNow(); _monitorExecutor.shutdownNow(); + newAgentConnectionsMonitor.shutdownNow(); return true; } @@ -879,7 +991,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { logger.info("Host {} is disconnecting with event {}", attache, event); - Status nextStatus = null; + Status nextStatus; final HostVO host = _hostDao.findById(hostId); if (host == null) { logger.warn("Can't find host with {} ({})", hostId, attache); @@ -993,7 +1105,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl handleDisconnectWithoutInvestigation(attache, event, true, true); host = _hostDao.findById(hostId); // Maybe the host magically reappeared? if (host != null && host.getStatus() == Status.Down) { - _haMgr.scheduleRestartForVmsOnHost(host, true); + _haMgr.scheduleRestartForVmsOnHost(host, true, HighAvailabilityManager.ReasonType.HostDown); } return true; } @@ -1012,7 +1124,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void runInContext() { try { - if (_investigate == true) { + if (_investigate) { handleDisconnectWithInvestigation(_attache, _event); } else { handleDisconnectWithoutInvestigation(_attache, _event, true, false); @@ -1064,8 +1176,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public Answer[] send(final Long hostId, final Commands cmds) throws AgentUnavailableException, OperationTimedoutException { int wait = 0; if (cmds.size() > 1) { - logger.debug(String.format("Checking the wait time in seconds to be used for the following commands : %s. If there are multiple commands sent at once," + - "then max wait time of those will be used", cmds)); + logger.debug("Checking the wait time in seconds to be used for the following commands : {}. If there are multiple commands sent at once," + + "then max wait time of those will be used", cmds); } for (final Command cmd : cmds) { @@ -1128,7 +1240,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - AgentAttache attache = null; + AgentAttache attache; attache = findAttache(hostId); logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { @@ -1154,12 +1266,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return agentAttache != null; } - protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { + protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { logger.debug("create ConnectedAgentAttache for {}", host); final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.put(host.getId(), attache); } @@ -1184,7 +1296,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } ready.setArch(host.getArch().getType()); - AgentAttache attache = null; + AgentAttache attache; GlobalLock joinLock = getHostJoinLock(host.getId()); if (joinLock.lock(60)) { try { @@ -1210,7 +1322,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return attache; } - private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup, final Request request) { + private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup) { AgentAttache attache = null; ReadyCommand ready = null; try { @@ -1238,7 +1350,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl easySend(attache.getId(), ready); } } catch (final Exception e) { - logger.debug("Failed to send ready command:" + e.toString()); + logger.debug("Failed to send ready command:", e); } return attache; } @@ -1264,6 +1376,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl this.id = id; this.resource = resource; this.details = details; + this.uuid = uuid; + this.name = name; } @Override @@ -1312,10 +1426,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl startups[i] = (StartupCommand)_cmds[i]; } - final AgentAttache attache = handleConnectedAgent(_link, startups, _request); + final AgentAttache attache = handleConnectedAgent(_link, startups); if (attache == null) { logger.warn("Unable to create attache for agent: {}", _request); } + unregisterNewConnection(_link.getSocketAddress()); } } @@ -1332,7 +1447,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl break; } } - Response response = null; + Response response; response = new Response(request, answers[0], _nodeId, -1); try { link.send(response.toBytes()); @@ -1413,7 +1528,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final long hostId = attache.getId(); - final String hostName = attache.getName(); if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { @@ -1432,7 +1546,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Answer[] answers = new Answer[cmds.length]; for (int i = 0; i < cmds.length; i++) { cmd = cmds[i]; - Answer answer = null; + Answer answer; try { if (cmd instanceof StartupRoutingCommand) { final StartupRoutingCommand startup = (StartupRoutingCommand) cmd; @@ -1466,7 +1580,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final long cmdHostId = ((PingCommand)cmd).getHostId(); boolean requestStartupCommand = false; - final HostVO host = _hostDao.findById(Long.valueOf(cmdHostId)); + final HostVO host = _hostDao.findById(cmdHostId); boolean gatewayAccessible = true; // if the router is sending a ping, verify the // gateway was pingable @@ -1516,7 +1630,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (logD) { logger.debug("SeqA {}-: Sending {}", attache.getId(), response.getSequence(), response); } else { - logger.trace("SeqA {}-: Sending {}" + attache.getId(), response.getSequence(), response); + logger.trace("SeqA {}-: Sending {} {}", response.getSequence(), response, attache.getId()); } try { link.send(response.toBytes()); @@ -1536,15 +1650,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void doTask(final Task task) throws TaskExecutionException { - final TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); - try { + try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { final Type type = task.getType(); - if (type == Task.Type.DATA) { + if (type == Type.DATA) { final byte[] data = task.getData(); try { final Request event = Request.parse(data); if (event instanceof Response) { - processResponse(task.getLink(), (Response)event); + processResponse(task.getLink(), (Response) event); } else { processRequest(task.getLink(), event); } @@ -1556,10 +1669,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.error(message); throw new TaskExecutionException(message, e); } - } else if (type == Task.Type.CONNECT) { - } else if (type == Task.Type.DISCONNECT) { + } else if (type == Type.CONNECT) { + } else if (type == Type.DISCONNECT) { final Link link = task.getLink(); - final AgentAttache attache = (AgentAttache)link.attachment(); + final AgentAttache attache = (AgentAttache) link.attachment(); if (attache != null) { disconnectWithInvestigation(attache, Event.AgentDisconnected); } else { @@ -1568,8 +1681,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl link.terminated(); } } - } finally { - txn.close(); } } } @@ -1598,21 +1709,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { - try { - _agentStatusLock.lock(); - logger.debug("[Resource state = {}, Agent event = , Host = {}]", - host.getResourceState(), e.toString(), host); + logger.debug("[Resource state = {}, Agent event = , Host = {}]", + host.getResourceState(), e.toString(), host); - host.setManagementServerId(msId); - try { - return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); - } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); - throw new CloudRuntimeException(String.format( - "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); - } - } finally { - _agentStatusLock.unlock(); + host.setManagementServerId(msId); + try { + return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); + } catch (final NoTransitionException e1) { + logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); + throw new CloudRuntimeException(String.format( + "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); } } @@ -1801,7 +1907,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected List findAgentsBehindOnPing() { - final List agentsBehind = new ArrayList(); + final List agentsBehind = new ArrayList<>(); final long cutoffTime = InaccurateClock.getTimeInSeconds() - mgmtServiceConf.getTimeout(); for (final Map.Entry entry : _pingMap.entrySet()) { if (entry.getValue() < cutoffTime) { @@ -1809,7 +1915,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - if (agentsBehind.size() > 0) { + if (!agentsBehind.isEmpty()) { logger.info("Found the following agents behind on ping: {}", agentsBehind); } @@ -1817,6 +1923,35 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + protected class AgentNewConnectionsMonitorTask extends ManagedContextRunnable { + @Override + protected void runInContext() { + logger.trace("Agent New Connections Monitor is started."); + final int cleanupTime = Wait.value(); + Set> entrySet = newAgentConnections.entrySet(); + long cutOff = System.currentTimeMillis() - (cleanupTime * 60 * 1000L); + if (logger.isDebugEnabled()) { + List expiredConnections = newAgentConnections.entrySet() + .stream() + .filter(e -> e.getValue() <= cutOff) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + logger.debug("Currently {} active new connections, of which {} have expired - {}", + entrySet.size(), + expiredConnections.size(), + StringUtils.join(expiredConnections)); + } + for (Map.Entry entry : entrySet) { + if (entry.getValue() <= cutOff) { + if (logger.isTraceEnabled()) { + logger.trace("Cleaning up new agent connection for {}", entry.getKey()); + } + newAgentConnections.remove(entry.getKey()); + } + } + } + } + protected class BehindOnPingListener implements Listener { @Override public boolean isRecurring() { @@ -1892,7 +2027,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, - DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, GranularWaitTimeForCommands }; + DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, + GranularWaitTimeForCommands, RemoteAgentSslHandshakeTimeout, RemoteAgentMaxConcurrentNewConnections }; } protected class SetHostParamsListener implements Listener { @@ -1922,12 +2058,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) { - if (cmd instanceof StartupRoutingCommand) { - if (((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.KVM || ((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.LXC) { - Map params = new HashMap(); - params.put(Config.RouterAggregationCommandEachTimeout.toString(), _configDao.getValue(Config.RouterAggregationCommandEachTimeout.toString())); - params.put(Config.MigrateWait.toString(), _configDao.getValue(Config.MigrateWait.toString())); - params.put(NetworkOrchestrationService.TUNGSTEN_ENABLED.key(), String.valueOf(NetworkOrchestrationService.TUNGSTEN_ENABLED.valueIn(host.getDataCenterId()))); + if (!(cmd instanceof StartupRoutingCommand) || cmd.isConnectionTransferred()) { + return; + } + + if (((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.KVM || ((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.LXC) { + Map params = new HashMap<>(); + params.put(Config.RouterAggregationCommandEachTimeout.toString(), _configDao.getValue(Config.RouterAggregationCommandEachTimeout.toString())); + params.put(Config.MigrateWait.toString(), _configDao.getValue(Config.MigrateWait.toString())); + params.put(NetworkOrchestrationService.TUNGSTEN_ENABLED.key(), String.valueOf(NetworkOrchestrationService.TUNGSTEN_ENABLED.valueIn(host.getDataCenterId()))); try { SetHostParamsCommand cmds = new SetHostParamsCommand(params); @@ -1939,8 +2078,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - } - @Override public boolean processDisconnect(final long agentId, final Status state) { return true; @@ -1971,13 +2108,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (allHosts == null) { return null; } - Map> hostsByZone = new HashMap>(); + Map> hostsByZone = new HashMap<>(); for (HostVO host : allHosts) { if (host.getHypervisorType() == HypervisorType.KVM || host.getHypervisorType() == HypervisorType.LXC) { Long zoneId = host.getDataCenterId(); List hostIds = hostsByZone.get(zoneId); if (hostIds == null) { - hostIds = new ArrayList(); + hostIds = new ArrayList<>(); } hostIds.add(host.getId()); hostsByZone.put(zoneId, hostIds); @@ -2008,6 +2145,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + @Override + public boolean transferDirectAgentsFromMS(String fromMsUuid, long fromMsId, long timeoutDurationInMs) { + return true; + } + private GlobalLock getHostJoinLock(Long hostId) { return GlobalLock.getInternLock(String.format("%s-%s", "Host-Join", hostId)); } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index be327418205..c667df5412e 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -47,14 +47,17 @@ import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.ha.dao.HAConfigDao; +import org.apache.cloudstack.maintenance.ManagementServerMaintenanceManager; +import org.apache.cloudstack.maintenance.command.BaseShutdownManagementServerHostCommand; +import org.apache.cloudstack.maintenance.command.CancelMaintenanceManagementServerHostCommand; +import org.apache.cloudstack.maintenance.command.CancelShutdownManagementServerHostCommand; +import org.apache.cloudstack.maintenance.command.PrepareForMaintenanceManagementServerHostCommand; +import org.apache.cloudstack.maintenance.command.PrepareForShutdownManagementServerHostCommand; +import org.apache.cloudstack.maintenance.command.TriggerShutdownManagementServerHostCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; -import org.apache.cloudstack.shutdown.ShutdownManager; -import org.apache.cloudstack.shutdown.command.CancelShutdownManagementServerHostCommand; -import org.apache.cloudstack.shutdown.command.PrepareForShutdownManagementServerHostCommand; -import org.apache.cloudstack.shutdown.command.BaseShutdownManagementServerHostCommand; -import org.apache.cloudstack.shutdown.command.TriggerShutdownManagementServerHostCommand; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.security.SSLUtils; @@ -73,13 +76,15 @@ import com.cloud.cluster.ClusterManager; import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ClusterServicePdu; import com.cloud.cluster.ClusteredAgentRebalanceService; -import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.cluster.agentlb.AgentLoadBalancerPlanner; import com.cloud.cluster.agentlb.HostTransferMapVO; import com.cloud.cluster.agentlb.HostTransferMapVO.HostTransferState; import com.cloud.cluster.agentlb.dao.HostTransferMapDao; import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.cluster.dao.ManagementServerHostPeerDao; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.UnsupportedVersionException; @@ -100,25 +105,30 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.nio.Task; import com.google.gson.Gson; +import org.apache.commons.collections.CollectionUtils; + public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService { - private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); + private static ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list public final static long STARTUP_DELAY = 5000; public final static long SCAN_INTERVAL = 90000; // 90 seconds, it takes 60 sec for xenserver to fail login public final static int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds - protected Set _agentToTransferIds = new HashSet(); + protected Set _agentToTransferIds = new HashSet<>(); Gson _gson; protected HashMap _peers; protected HashMap _sslEngines; private final Timer _timer = new Timer("ClusteredAgentManager Timer"); boolean _agentLbHappened = false; + private int _mshostCounter = 0; @Inject protected ClusterManager _clusterMgr = null; @Inject protected ManagementServerHostDao _mshostDao; @Inject + protected ManagementServerHostPeerDao _mshostPeerDao; + @Inject protected HostTransferMapDao _hostTransferDao; @Inject protected List _lbPlanners; @@ -133,23 +143,25 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Inject private CAManager caService; @Inject - private ShutdownManager shutdownManager; + private ManagementServerMaintenanceManager managementServerMaintenanceManager; + @Inject + private DataCenterDao dcDao; protected ClusteredAgentManagerImpl() { super(); } - protected final ConfigKey EnableLB = new ConfigKey(Boolean.class, "agent.lb.enabled", "Advanced", "false", "Enable agent load balancing between management server nodes", true); - protected final ConfigKey ConnectedAgentThreshold = new ConfigKey(Double.class, "agent.load.threshold", "Advanced", "0.7", + protected final ConfigKey EnableLB = new ConfigKey<>(Boolean.class, "agent.lb.enabled", "Advanced", "false", "Enable agent load balancing between management server nodes", true); + protected final ConfigKey ConnectedAgentThreshold = new ConfigKey<>(Double.class, "agent.load.threshold", "Advanced", "0.7", "What percentage of the agents can be held by one management server before load balancing happens", true, EnableLB.key()); - protected final ConfigKey LoadSize = new ConfigKey(Integer.class, "direct.agent.load.size", "Advanced", "16", "How many agents to connect to in each round", true); - protected final ConfigKey ScanInterval = new ConfigKey(Integer.class, "direct.agent.scan.interval", "Advanced", "90", "Interval between scans to load agents", false, + protected final ConfigKey LoadSize = new ConfigKey<>(Integer.class, "direct.agent.load.size", "Advanced", "16", "How many agents to connect to in each round", true); + protected final ConfigKey ScanInterval = new ConfigKey<>(Integer.class, "direct.agent.scan.interval", "Advanced", "90", "Interval between scans to load agents", false, ConfigKey.Scope.Global, 1000); @Override public boolean configure(final String name, final Map xmlParams) throws ConfigurationException { - _peers = new HashMap(7); - _sslEngines = new HashMap(7); + _peers = new HashMap<>(7); + _sslEngines = new HashMap<>(7); _nodeId = ManagementServerNode.getManagementServerId(); logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): {}", _nodeId); @@ -172,6 +184,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value()); logger.debug("Scheduled direct agent scan task to run at an interval of {} seconds", ScanInterval.value()); + ManagementServerHostVO msHost = _mshostDao.findByMsid(_nodeId); + if (msHost != null && (ManagementServerHost.State.Maintenance.equals(msHost.getState()) || ManagementServerHost.State.PreparingForMaintenance.equals(msHost.getState()))) { + s_transferExecutor.shutdownNow(); + cleanupTransferMap(_nodeId); + return true; + } + // Schedule tasks for agent rebalancing if (isAgentRebalanceEnabled()) { cleanupTransferMap(_nodeId); @@ -201,7 +220,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (hosts != null) { hosts.addAll(appliances); - if (hosts.size() > 0) { + if (!hosts.isEmpty()) { logger.debug("Found {} unmanaged direct hosts, processing connect for them...", hosts.size()); for (final HostVO host : hosts) { try { @@ -215,12 +234,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust continue; } } - - logger.debug("Loading directly connected host {}", host); + logger.debug("Loading directly connected {}", host); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host {}({}) due to ", - host, e); + logger.warn(" can not load directly connected {} due to ", host, e); } } } @@ -248,10 +265,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.debug("create forwarding ClusteredAgentAttache for {}", host); long id = host.getId(); final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName()); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { - old = _agents.get(id); - _agents.put(id, attache); + old = _agents.get(host.getId()); + _agents.put(host.getId(), attache); } if (old != null) { logger.debug("Remove stale agent attache from current management server"); @@ -265,7 +282,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.debug("create ClusteredAgentAttache for {}", host); final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.get(host.getId()); _agents.put(host.getId(), attache); @@ -280,7 +297,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("Create ClusteredDirectAgentAttache for {}.", host); final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.get(host.getId()); _agents.put(host.getId(), attache); @@ -399,12 +416,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public boolean routeToPeer(final String peer, final byte[] bytes) { int i = 0; SocketChannel ch = null; - SSLEngine sslEngine = null; + SSLEngine sslEngine; while (i++ < 5) { ch = connectToPeer(peer, ch); if (ch == null) { try { - logD(bytes, "Unable to route to peer: " + Request.parse(bytes).toString()); + logD(bytes, "Unable to route to peer: " + Request.parse(bytes)); } catch (ClassNotFoundException | UnsupportedVersionException e) { // Request.parse thrown exception when we try to log it, log as much as we can logD(bytes, "Unable to route to peer, and Request.parse further caught exception" + e.getMessage()); @@ -422,7 +439,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return true; } catch (final IOException e) { try { - logI(bytes, "Unable to route to peer: " + Request.parse(bytes).toString() + " due to " + e.getMessage()); + logI(bytes, "Unable to route to peer: " + Request.parse(bytes) + " due to " + e.getMessage()); } catch (ClassNotFoundException | UnsupportedVersionException ex) { // Request.parse thrown exception when we try to log it, log as much as we can logI(bytes, "Unable to route to peer due to" + e.getMessage() + ". Also caught exception when parsing request: " + ex.getMessage()); @@ -465,7 +482,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public SocketChannel connectToPeer(final String peerName, final SocketChannel prevCh) { synchronized (_peers) { final SocketChannel ch = _peers.get(peerName); - SSLEngine sslEngine = null; + SSLEngine sslEngine; if (prevCh != null) { try { prevCh.close(); @@ -550,13 +567,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host); + logger.debug("{} has switched to another management server, need to update agent map with a forwarding agent attache", host); agent = createAttache(host); } } if (agent == null) { final AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId); - ex.addProxyObject(_entityMgr.findById(Host.class, hostId).getUuid()); + ex.addProxyObject(host.getUuid()); throw ex; } @@ -585,7 +602,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } @Override - public void startDirectlyConnectedHosts() { + public void startDirectlyConnectedHosts(final boolean forRebalance) { // override and let it be dummy for purpose, we will scan and load direct agents periodically. // We may also pickup agents that have been left over from other crashed management server } @@ -598,9 +615,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void doTask(final Task task) throws TaskExecutionException { - final TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); - try { - if (task.getType() != Task.Type.DATA) { + try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { + if (task.getType() != Type.DATA) { super.doTask(task); return; } @@ -627,7 +643,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } final Request req = Request.parse(data); final Command[] cmds = req.getCommands(); - final CancelCommand cancel = (CancelCommand)cmds[0]; + final CancelCommand cancel = (CancelCommand) cmds[0]; logD(data, "Cancel request received"); agent.cancel(cancel.getSequence()); final Long current = agent._currentSequence; @@ -651,10 +667,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // to deserialize this and send it through the agent attache. final Request req = Request.parse(data); agent.send(req, null); - return; } else { if (agent instanceof Routable) { - final Routable cluster = (Routable)agent; + final Routable cluster = (Routable) agent; cluster.routeToAgent(data); } else { agent.send(Request.parse(data)); @@ -671,13 +686,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (mgmtId != -1 && mgmtId != _nodeId) { routeToPeer(Long.toString(mgmtId), data); if (Request.requiresSequentialExecution(data)) { - final AgentAttache attache = (AgentAttache)link.attachment(); + final AgentAttache attache = (AgentAttache) link.attachment(); if (attache != null) { attache.sendNext(Request.getSequence(data)); } - logD(data, "No attache to process " + Request.parse(data).toString()); + logD(data, "No attache to process " + Request.parse(data)); } - return; } else { if (Request.isRequest(data)) { super.doTask(task); @@ -693,7 +707,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.info("SeqA {}-{}: Response is not processed: {}", attache.getId(), response.getSequence(), response.toString()); } } - return; } } } catch (final ClassNotFoundException e) { @@ -704,8 +717,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final String message = String.format("UnsupportedVersionException occurred when executing tasks! Error '%s'", e.getMessage()); logger.error(message); throw new TaskExecutionException(message, e); - } finally { - txn.close(); } } } @@ -742,12 +753,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public boolean executeRebalanceRequest(final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event) throws AgentUnavailableException, OperationTimedoutException { + return executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event, false); + } + + @Override + public boolean executeRebalanceRequest(final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event, boolean isConnectionTransfer) throws AgentUnavailableException, OperationTimedoutException { boolean result = false; if (event == Event.RequestAgentRebalance) { - return setToWaitForRebalance(agentId, currentOwnerId, futureOwnerId); + return setToWaitForRebalance(agentId); } else if (event == Event.StartAgentRebalance) { try { - result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); + result = rebalanceHost(agentId, currentOwnerId, futureOwnerId, isConnectionTransfer); } catch (final Exception e) { logger.warn("Unable to rebalance host id={} ({})", agentId, findAttache(agentId), e); } @@ -799,7 +815,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); final List allManagedAgents = sc.list(); - int avLoad = 0; + int avLoad; if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) { avLoad = allManagedAgents.size() / allMS.size(); @@ -817,7 +833,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust for (final ManagementServerHostVO node : allMS) { if (node.getMsid() != _nodeId) { - List hostsToRebalance = new ArrayList(); + List hostsToRebalance = new ArrayList<>(); for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) { hostsToRebalance = lbPlanner.getHostsToRebalance(node, avLoad); if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { @@ -843,7 +859,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust HostTransferMapVO transfer = null; try { transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); - final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); + final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId); if (answer == null) { logger.warn("Failed to get host {} from management server {}", host, node); result = false; @@ -870,8 +886,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - private Answer[] sendRebalanceCommand(final long peer, final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event) { - final TransferAgentCommand transfer = new TransferAgentCommand(agentId, currentOwnerId, futureOwnerId, event); + private Answer[] sendRebalanceCommand(final long peer, final long agentId, final long currentOwnerId, final long futureOwnerId) { + return sendRebalanceCommand(peer, agentId, currentOwnerId, futureOwnerId, Event.RequestAgentRebalance, false); + } + + private Answer[] sendRebalanceCommand(final long peer, final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event, final boolean isConnectionTransfer) { + final TransferAgentCommand transfer = new TransferAgentCommand(agentId, currentOwnerId, futureOwnerId, event, isConnectionTransfer); final Commands commands = new Commands(Command.OnError.Stop); commands.addCommand(transfer); @@ -882,8 +902,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final String peerName = Long.toString(peer); final String cmdStr = _gson.toJson(cmds); final String ansStr = _clusterMgr.execute(peerName, agentId, cmdStr, true); - final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); - return answers; + return _gson.fromJson(ansStr, Answer[].class); } catch (final Exception e) { logger.warn("Caught exception while talking to {}", currentOwnerId, e); return null; @@ -932,7 +951,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { logger.trace("Clustered agent transfer scan check, management server id: {}", _nodeId); synchronized (_agentToTransferIds) { - if (_agentToTransferIds.size() > 0) { + if (!_agentToTransferIds.isEmpty()) { logger.debug("Found {} agents to transfer", _agentToTransferIds.size()); // for (Long hostId : _agentToTransferIds) { for (final Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext();) { @@ -956,7 +975,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - logger.debug(String.format("Management server %d doesn't own host id=%d (%s) any more, skipping rebalance for the host", _nodeId, hostId, attache)); + logger.debug("Management server {} doesn't own host id={} ({}) any more, skipping rebalance for the host", _nodeId, hostId, attache); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -976,9 +995,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { logger.warn("Failed to submit rebalance task for host id={} ({}); postponing the execution", hostId, attache); - continue; } - } else { logger.debug("Agent {} ({}) can't be transferred yet as its request queue size is {} and listener queue size is {}", hostId, attache, attache.getQueueSize(), attache.getNonRecurringListenersSize()); @@ -988,7 +1005,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.trace("Found no agents to be transferred by the management server {}", _nodeId); } } - } catch (final Throwable e) { logger.error("Problem with the clustered agent transfer scan check!", e); } @@ -996,7 +1012,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust }; } - private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { + private boolean setToWaitForRebalance(final long hostId) { logger.debug("Adding agent {} ({}) to the list of agents to transfer", hostId, findAttache(hostId)); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); @@ -1004,7 +1020,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } protected boolean rebalanceHost(final long hostId, final long currentOwnerId, final long futureOwnerId) throws AgentUnavailableException { + return rebalanceHost(hostId, currentOwnerId, futureOwnerId, false); + } + protected boolean rebalanceHost(final long hostId, final long currentOwnerId, final long futureOwnerId, final boolean isConnectionTransfer) throws AgentUnavailableException { boolean result = true; if (currentOwnerId == _nodeId) { if (!startRebalance(hostId)) { @@ -1013,7 +1032,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return false; } try { - final Answer[] answer = sendRebalanceCommand(futureOwnerId, hostId, currentOwnerId, futureOwnerId, Event.StartAgentRebalance); + final Answer[] answer = sendRebalanceCommand(futureOwnerId, hostId, currentOwnerId, futureOwnerId, Event.StartAgentRebalance, isConnectionTransfer); if (answer == null || !answer[0].getResult()) { result = false; } @@ -1034,7 +1053,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host {} as a part of rebalance process without notification", host); + logger.debug("Disconnecting {} as a part of rebalance process without notification", host); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1043,7 +1062,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (result) { logger.debug("Loading directly connected host {} to the management server {} as a part of rebalance process", host, _nodeId); - result = loadDirectlyConnectedHost(host, true); + result = loadDirectlyConnectedHost(host, true, isConnectionTransfer); } else { logger.warn("Failed to disconnect {} as a part of rebalance process without notification", host); } @@ -1054,9 +1073,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); + logger.debug("Successfully loaded directly connected {} to the management server {} a part of rebalance process without notification", host, _nodeId); } else { - logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); + logger.warn("Failed to load directly connected {} to the management server {} a part of rebalance process without notification", host, _nodeId); } } @@ -1065,12 +1084,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { - final boolean success = event == Event.RebalanceCompleted ? true : false; + final boolean success = event == Event.RebalanceCompleted; final AgentAttache attache = findAttache(hostId); logger.debug("Finishing rebalancing for the agent {} ({}) with event {}", hostId, attache, event); - if (attache == null || !(attache instanceof ClusteredAgentAttache)) { + if (!(attache instanceof ClusteredAgentAttache)) { logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId); _hostTransferDao.completeAgentTransfer(hostId); return; @@ -1166,9 +1185,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } protected class RebalanceTask extends ManagedContextRunnable { - Long hostId = null; - Long currentOwnerId = null; - Long futureOwnerId = null; + Long hostId; + Long currentOwnerId; + Long futureOwnerId; public RebalanceTask(final long hostId, final long currentOwnerId, final long futureOwnerId) { this.hostId = hostId; @@ -1237,7 +1256,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; logger.debug("Intercepting command for agent change: agent {} event: {}", cmd.getAgentId(), cmd.getEvent()); - boolean result = false; + boolean result; try { result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent()); logger.debug("Result is {}", result); @@ -1253,10 +1272,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) { final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0]; - logger.debug("Intercepting command for agent rebalancing: agent {} event: {}", cmd.getAgentId(), cmd.getEvent()); - boolean result = false; + logger.debug("Intercepting command for agent rebalancing: agent: {}, event: {}, connection transfer: {}", cmd.getAgentId(), cmd.getEvent(), cmd.isConnectionTransfer()); + boolean result; try { - result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner()); + result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner(), cmd.isConnectionTransfer()); logger.debug("Result is {}", result); } catch (final AgentUnavailableException e) { @@ -1274,7 +1293,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId())); - boolean result = false; + boolean result; try { result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent()); logger.debug("Result is {}", result); @@ -1320,10 +1339,28 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } private String handleShutdownManagementServerHostCommand(BaseShutdownManagementServerHostCommand cmd) { - if (cmd instanceof PrepareForShutdownManagementServerHostCommand) { - logger.debug("Received BaseShutdownManagementServerHostCommand - preparing to shut down"); + if (cmd instanceof PrepareForMaintenanceManagementServerHostCommand) { + logger.debug("Received PrepareForMaintenanceManagementServerHostCommand - preparing for maintenance"); try { - shutdownManager.prepareForShutdown(); + managementServerMaintenanceManager.prepareForMaintenance(((PrepareForMaintenanceManagementServerHostCommand) cmd).getLbAlgorithm()); + return "Successfully prepared for maintenance"; + } catch(CloudRuntimeException e) { + return e.getMessage(); + } + } + if (cmd instanceof CancelMaintenanceManagementServerHostCommand) { + logger.debug("Received CancelMaintenanceManagementServerHostCommand - cancelling maintenance"); + try { + managementServerMaintenanceManager.cancelMaintenance(); + return "Successfully cancelled maintenance"; + } catch(CloudRuntimeException e) { + return e.getMessage(); + } + } + if (cmd instanceof PrepareForShutdownManagementServerHostCommand) { + logger.debug("Received PrepareForShutdownManagementServerHostCommand - preparing to shut down"); + try { + managementServerMaintenanceManager.prepareForShutdown(); return "Successfully prepared for shutdown"; } catch(CloudRuntimeException e) { return e.getMessage(); @@ -1332,7 +1369,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (cmd instanceof TriggerShutdownManagementServerHostCommand) { logger.debug("Received TriggerShutdownManagementServerHostCommand - triggering a shut down"); try { - shutdownManager.triggerShutdown(); + managementServerMaintenanceManager.triggerShutdown(); return "Successfully triggered shutdown"; } catch(CloudRuntimeException e) { return e.getMessage(); @@ -1341,8 +1378,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (cmd instanceof CancelShutdownManagementServerHostCommand) { logger.debug("Received CancelShutdownManagementServerHostCommand - cancelling shut down"); try { - shutdownManager.cancelShutdown(); - return "Successfully prepared for shutdown"; + managementServerMaintenanceManager.cancelShutdown(); + return "Successfully cancelled shutdown"; } catch(CloudRuntimeException e) { return e.getMessage(); } @@ -1351,6 +1388,127 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } + @Override + public boolean transferDirectAgentsFromMS(String fromMsUuid, long fromMsId, long timeoutDurationInMs) { + if (timeoutDurationInMs <= 0) { + logger.debug("Not transferring direct agents from management server node {} (id: {}) to other nodes, invalid timeout duration", fromMsId, fromMsUuid); + return false; + } + + long transferStartTime = System.currentTimeMillis(); + if (CollectionUtils.isEmpty(getDirectAgentHosts(fromMsId))) { + logger.info("No direct agent hosts available on management server node {} (id: {}), to transfer", fromMsId, fromMsUuid); + return true; + } + + List msHosts = getUpMsHostsExcludingMs(fromMsId); + if (msHosts.isEmpty()) { + logger.warn("No management server nodes available to transfer agents from management server node {} (id: {})", fromMsId, fromMsUuid); + return false; + } + + logger.debug("Transferring direct agents from management server node {} (id: {}) to other nodes", fromMsId, fromMsUuid); + int agentTransferFailedCount = 0; + List dataCenterList = dcDao.listAll(); + for (DataCenterVO dc : dataCenterList) { + List directAgentHostsInDc = getDirectAgentHostsInDc(fromMsId, dc.getId()); + if (CollectionUtils.isEmpty(directAgentHostsInDc)) { + continue; + } + logger.debug("Transferring {} direct agents from management server node {} (id: {}) of zone {}", directAgentHostsInDc.size(), fromMsId, fromMsUuid, dc); + for (HostVO host : directAgentHostsInDc) { + long transferElapsedTimeInMs = System.currentTimeMillis() - transferStartTime; + if (transferElapsedTimeInMs >= timeoutDurationInMs) { + logger.debug("Stop transferring remaining direct agents from management server node {} (id: {}), timed out", fromMsId, fromMsUuid); + return false; + } + + try { + if (_mshostCounter >= msHosts.size()) { + _mshostCounter = 0; + } + ManagementServerHostVO msHost = msHosts.get(_mshostCounter % msHosts.size()); + _mshostCounter++; + + _hostTransferDao.startAgentTransfering(host.getId(), fromMsId, msHost.getMsid()); + if (!rebalanceAgent(host.getId(), Event.StartAgentRebalance, fromMsId, msHost.getMsid(), true)) { + agentTransferFailedCount++; + } else { + updateLastManagementServer(host.getId(), fromMsId); + } + } catch (Exception e) { + logger.warn("Failed to transfer direct agent of the host {} from management server node {} (id: {}), due to {}", host, fromMsId, fromMsUuid, e.getMessage()); + } + } + } + + return (agentTransferFailedCount == 0); + } + + private List getDirectAgentHosts(long msId) { + List directAgentHosts = new ArrayList<>(); + List hosts = _hostDao.listHostsByMs(msId); + for (HostVO host : hosts) { + AgentAttache agent = findAttache(host.getId()); + if (agent instanceof DirectAgentAttache) { + directAgentHosts.add(host); + } + } + + return directAgentHosts; + } + + private List getDirectAgentHostsInDc(long msId, long dcId) { + List directAgentHosts = new ArrayList<>(); + List hosts = _hostDao.listHostsByMsAndDc(msId, dcId); + for (HostVO host : hosts) { + AgentAttache agent = findAttache(host.getId()); + if (agent instanceof DirectAgentAttache) { + directAgentHosts.add(host); + } + } + + return directAgentHosts; + } + + private List getUpMsHostsExcludingMs(long avoidMsId) { + final List msHosts = _mshostDao.listBy(ManagementServerHost.State.Up); + msHosts.removeIf(ms -> ms.getMsid() == avoidMsId || _mshostPeerDao.findByPeerMsAndState(ms.getId(), ManagementServerHost.State.Up) == null); + + return msHosts; + } + + private void updateLastManagementServer(long hostId, long msId) { + HostVO hostVO = _hostDao.findById(hostId); + if (hostVO != null) { + hostVO.setLastManagementServerId(msId); + _hostDao.update(hostId, hostVO); + } + } + + @Override + public void onManagementServerMaintenance() { + logger.debug("Management server maintenance enabled"); + s_transferExecutor.shutdownNow(); + cleanupTransferMap(_nodeId); + _agentLbHappened = false; + super.onManagementServerMaintenance(); + } + + @Override + public void onManagementServerCancelMaintenance() { + logger.debug("Management server maintenance disabled"); + super.onManagementServerCancelMaintenance(); + if (isAgentRebalanceEnabled()) { + cleanupTransferMap(_nodeId); + if (s_transferExecutor.isShutdown()) { + s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); + s_transferExecutor.scheduleAtFixedRate(getAgentRebalanceScanTask(), 60000, 60000, TimeUnit.MILLISECONDS); + s_transferExecutor.scheduleAtFixedRate(getTransferScanTask(), 60000, ClusteredAgentRebalanceService.DEFAULT_TRANSFER_CHECK_INTERVAL, TimeUnit.MILLISECONDS); + } + } + } + public boolean executeAgentUserRequest(final long agentId, final Event event) throws AgentUnavailableException { return executeUserRequest(agentId, event); } @@ -1359,6 +1517,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event); } + public boolean rebalanceAgent(final long agentId, final Event event, final long currentOwnerId, final long futureOwnerId, boolean isConnectionTransfer) throws AgentUnavailableException, OperationTimedoutException { + return executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event, isConnectionTransfer); + } + public boolean isAgentRebalanceEnabled() { return EnableLB.value(); } @@ -1413,8 +1575,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public ConfigKey[] getConfigKeys() { final ConfigKey[] keys = super.getConfigKeys(); - final List> keysLst = new ArrayList>(); - keysLst.addAll(Arrays.asList(keys)); + final List> keysLst = new ArrayList<>(Arrays.asList(keys)); keysLst.add(EnableLB); keysLst.add(ConnectedAgentThreshold); keysLst.add(LoadSize); diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/ClusteredAgentRebalanceService.java b/engine/orchestration/src/main/java/com/cloud/cluster/ClusteredAgentRebalanceService.java index ed52eb1a241..524b1c3adb6 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/ClusteredAgentRebalanceService.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/ClusteredAgentRebalanceService.java @@ -27,4 +27,5 @@ public interface ClusteredAgentRebalanceService { boolean executeRebalanceRequest(long agentId, long currentOwnerId, long futureOwnerId, Event event) throws AgentUnavailableException, OperationTimedoutException; + boolean executeRebalanceRequest(long agentId, long currentOwnerId, long futureOwnerId, Event event, boolean isConnectionTransfer) throws AgentUnavailableException, OperationTimedoutException; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index a8b0130bdbc..6d27b0efed3 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -85,6 +85,7 @@ import org.apache.cloudstack.resource.ResourceCleanupService; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.cache.SingleCache; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.cloudstack.vm.UnmanagedVMsManager; @@ -406,6 +407,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private DomainDao domainDao; @Inject ResourceCleanupService resourceCleanupService; + @Inject + VmWorkJobDao vmWorkJobDao; + + private SingleCache> vmIdsInProgressCache; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -450,6 +455,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Long.class, "systemvm.root.disk.size", "-1", "Size of root volume (in GB) of system VMs and virtual routers", true); + private boolean syncTransitioningVmPowerState; + ScheduledExecutorService _executor = null; private long _nodeId; @@ -816,6 +823,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public boolean start() { + vmIdsInProgressCache = new SingleCache<>(10, vmWorkJobDao::listVmIdsWithPendingJob); _executor.scheduleAtFixedRate(new CleanupTask(), 5, VmJobStateReportInterval.value(), TimeUnit.SECONDS); _executor.scheduleAtFixedRate(new TransitionTask(), VmOpCleanupInterval.value(), VmOpCleanupInterval.value(), TimeUnit.SECONDS); cancelWorkItems(_nodeId); @@ -843,6 +851,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this)); + syncTransitioningVmPowerState = Boolean.TRUE.equals(VmSyncPowerStateTransitioning.value()); + return true; } @@ -3506,7 +3516,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId()) && (HypervisorType.VMware.equals(host.getHypervisorType()) || !checkIfVmHasClusterWideVolumes(vm.getId()))) { logger.info("Searching for hosts in the zone for vm migration"); - List clustersToExclude = _clusterDao.listAllClusters(host.getDataCenterId()); + List clustersToExclude = _clusterDao.listAllClusterIds(host.getDataCenterId()); List clusterList = _clusterDao.listByDcHyType(host.getDataCenterId(), host.getHypervisorType().toString()); for (ClusterVO cluster : clusterList) { clustersToExclude.remove(cluster.getId()); @@ -3800,7 +3810,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (ping.getHostVmStateReport() != null) { _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand()); } - scanStalledVMInTransitionStateOnUpHost(agentId); processed = true; } @@ -4757,7 +4766,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize, - AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName + AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName, + VmSyncPowerStateTransitioning }; } @@ -4955,20 +4965,46 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + /** + * Scans stalled VMs in transition states on an UP host and processes them accordingly. + * + *

This method is executed only when the {@code syncTransitioningVmPowerState} flag is enabled. It identifies + * VMs stuck in specific states (e.g., Starting, Stopping, Migrating) on a host that is UP, except for those + * in the Expunging state, which require special handling.

+ * + *

The following conditions are checked during the scan: + *

    + *
  • No pending {@code VmWork} job exists for the VM.
  • + *
  • The VM is associated with the given {@code hostId}, and the host is UP.
  • + *
+ *

+ * + *

When a host is UP, a state report for the VMs will typically be received. However, certain scenarios + * (e.g., out-of-band changes or behavior specific to hypervisors like XenServer or KVM) might result in + * missing reports, preventing the state-sync logic from running. To address this, the method scans VMs + * based on their last update timestamp. If a VM remains stalled without a status update while its host is UP, + * it is assumed to be powered off, which is generally a safe assumption.

+ * + * @param hostId the ID of the host to scan for stalled VMs in transition states. + */ private void scanStalledVMInTransitionStateOnUpHost(final long hostId) { - final long stallThresholdInMs = VmJobStateReportInterval.value() + (VmJobStateReportInterval.value() >> 1); - final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs); - final List mostlikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostId, cutTime); - for (final Long vmId : mostlikelyStoppedVMs) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + if (!syncTransitioningVmPowerState) { + return; + } + if (!_hostDao.isHostUp(hostId)) { + return; + } + final long stallThresholdInMs = VmJobStateReportInterval.value() * 2; + final long cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs).getTime(); + final List hostTransitionVms = _vmDao.listByHostAndState(hostId, State.Starting, State.Stopping, State.Migrating); + + final List mostLikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : mostLikelyStoppedVMs) { handlePowerOffReportWithNoPendingJobsOnVM(vm); } - final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostId, cutTime); - for (final Long vmId : vmsWithRecentReport) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : vmsWithRecentReport) { if (vm.getPowerState() == PowerState.PowerOn) { handlePowerOnReportWithNoPendingJobsOnVM(vm); } else { @@ -4977,6 +5013,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + private void scanStalledVMInTransitionStateOnDisconnectedHosts() { final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - VmOpWaitInterval.value() * 1000); final List stuckAndUncontrollableVMs = listStalledVMInTransitionStateOnDisconnectedHosts(cutTime); @@ -4989,89 +5026,58 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - private List listStalledVMInTransitionStateOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, JobInfo.Status.IN_PROGRESS.ordinal()); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\"} due to [{}].", sql, hostId, cutTimeStr, e.getMessage(), e); - } + private List listStalledVMInTransitionStateOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } - return l; + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() < cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } - private List listVMInTransitionStateWithRecentReportOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time > ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage(), e); - } - return l; + private List listVMInTransitionStateWithRecentReportOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() > cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } private List listStalledVMInTransitionStateOnDisconnectedHosts(final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status != 'UP' " + - "AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; + final String sql = "SELECT i.* " + + "FROM vm_instance AS i " + + "INNER JOIN host AS h ON i.host_id = h.id " + + "WHERE h.status != 'UP' " + + " AND i.power_state_update_time < ? " + + " AND i.state IN ('Starting', 'Stopping', 'Migrating') " + + " AND i.id NOT IN (SELECT vm_instance_id FROM vm_work_job AS w " + + " INNER JOIN async_job AS j ON w.id = j.id " + + " WHERE j.job_status = ?) " + + " AND i.removed IS NULL"; final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); + int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); + try { + PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setString(1, cutTimeStr); - pstmt.setInt(2, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); + pstmt.setString(1, cutTimeStr); + pstmt.setInt(2, jobStatusInProgress); + final ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + l.add(rs.getLong(1)); } - return l; + } catch (final SQLException e) { + logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); } + return l; } public class VmStateSyncOutcome extends OutcomeImpl { @@ -5953,29 +5959,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds) { + public HashMap getVirtualMachineStatistics(Host host, List vmIds) { HashMap vmStatsById = new HashMap<>(); if (CollectionUtils.isEmpty(vmIds)) { return vmStatsById; } - Map vmMap = new HashMap<>(); - for (Long vmId : vmIds) { - vmMap.put(vmId, _vmDao.findById(vmId)); - } - return getVirtualMachineStatistics(hostId, hostName, vmMap); + Map vmMap = _vmDao.getNameIdMapForVmIds(vmIds); + return getVirtualMachineStatistics(host, vmMap); } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap) { + public HashMap getVirtualMachineStatistics(Host host, Map vmInstanceNameIdMap) { HashMap vmStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM statistics."); return vmStatsById; @@ -5986,23 +5986,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmStatsById; } for (Map.Entry entry : vmStatsByName.entrySet()) { - vmStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmStatsById; } @Override - public HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmDiskStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmDiskStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmDiskStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM disk statistics."); return vmDiskStatsById; @@ -6013,23 +6010,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmDiskStatsById; } for (Map.Entry> entry: vmDiskStatsByName.entrySet()) { - vmDiskStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmDiskStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmDiskStatsById; } @Override - public HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmNetworkStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmNetworkStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmNetworkStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM network statistics."); return vmNetworkStatsById; @@ -6040,7 +6034,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmNetworkStatsById; } for (Map.Entry> entry: vmNetworkStatsByName.entrySet()) { - vmNetworkStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmNetworkStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmNetworkStatsById; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 94dddfdf18a..4b344ac4299 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -16,27 +16,29 @@ // under the License. package com.cloud.vm; -import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.utils.Pair; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.configuration.ManagementServiceConfiguration; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.utils.DateUtil; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.dao.VMInstanceDao; public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStateSync { @@ -47,7 +49,12 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Inject HostDao hostDao; @Inject ManagementServiceConfiguration mgmtServiceConf; + private LazyCache vmCache; + private LazyCache hostCache; + public VirtualMachinePowerStateSyncImpl() { + vmCache = new LazyCache<>(16, 10, this::getVmFromId); + hostCache = new LazyCache<>(16, 10, this::getHostFromId); } @Override @@ -58,130 +65,141 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Override public void processHostVmStateReport(long hostId, Map report) { - HostVO host = hostDao.findById(hostId); - logger.debug("Process host VM state report. host: {}", host); - - Map> translatedInfo = convertVmStateReport(report); - processReport(host, translatedInfo, false); + logger.debug("Process host VM state report. host: {}", hostCache.get(hostId)); + Map translatedInfo = convertVmStateReport(report); + processReport(hostId, translatedInfo, false); } @Override public void processHostVmStatePingReport(long hostId, Map report, boolean force) { - HostVO host = hostDao.findById(hostId); - logger.debug("Process host VM state report from ping process. host: {}", host); - - Map> translatedInfo = convertVmStateReport(report); - processReport(host, translatedInfo, force); + logger.debug("Process host VM state report from ping process. host: {}", hostCache.get(hostId)); + Map translatedInfo = convertVmStateReport(report); + processReport(hostId, translatedInfo, force); } - private void processReport(HostVO host, Map> translatedInfo, boolean force) { - - logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size()); - - for (Map.Entry> entry : translatedInfo.entrySet()) { - - logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - - if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); - } else { - logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second()); - } + private void updateAndPublishVmPowerStates(long hostId, Map instancePowerStates, + Date updateTime) { + if (instancePowerStates.isEmpty()) { + return; } + Set vmIds = instancePowerStates.keySet(); + Map notUpdated = _instanceDao.updatePowerState(instancePowerStates, hostId, + updateTime); + if (notUpdated.size() > vmIds.size()) { + return; + } + for (Long vmId : vmIds) { + if (!notUpdated.isEmpty() && !notUpdated.containsKey(vmId)) { + logger.debug("VM state report is updated. {}, {}, power state: {}", + () -> hostCache.get(hostId), () -> vmCache.get(vmId), () -> instancePowerStates.get(vmId)); + _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, + PublishScope.GLOBAL, vmId); + continue; + } + logger.trace("VM power state does not change, skip DB writing. {}", () -> vmCache.get(vmId)); + } + } + private List filterOutdatedFromMissingVmReport(List vmsThatAreMissingReport) { + List outdatedVms = vmsThatAreMissingReport.stream() + .filter(v -> !_instanceDao.isPowerStateUpToDate(v)) + .map(VMInstanceVO::getId) + .collect(Collectors.toList()); + if (CollectionUtils.isEmpty(outdatedVms)) { + return vmsThatAreMissingReport; + } + _instanceDao.resetVmPowerStateTracking(outdatedVms); + return vmsThatAreMissingReport.stream() + .filter(v -> !outdatedVms.contains(v.getId())) + .collect(Collectors.toList()); + } + + private void processMissingVmReport(long hostId, Set vmIds, boolean force) { // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running, - VirtualMachine.State.Stopping, VirtualMachine.State.Starting); - java.util.Iterator it = vmsThatAreMissingReport.iterator(); - while (it.hasNext()) { - VMInstanceVO instance = it.next(); - if (translatedInfo.get(instance.getId()) != null) - it.remove(); + List vmsThatAreMissingReport = _instanceDao.findByHostInStatesExcluding(hostId, vmIds, + VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); + // here we need to be wary of out of band migration as opposed to other, more unexpected state changes + if (vmsThatAreMissingReport.isEmpty()) { + return; + } + Date currentTime = DateUtil.currentGMTTime(); + logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); + if (!force) { + vmsThatAreMissingReport = filterOutdatedFromMissingVmReport(vmsThatAreMissingReport); } - // here we need to be wary of out of band migration as opposed to other, more unexpected state changes - if (vmsThatAreMissingReport.size() > 0) { - Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime()); - - // 2 times of sync-update interval for graceful period - long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; - - for (VMInstanceVO instance : vmsThatAreMissingReport) { - - // Make sure powerState is up to date for missing VMs - try { - if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance); - _instanceDao.resetVmPowerStateTracking(instance.getId()); - continue; - } - } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e); - continue; - } - - Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + // 2 times of sync-update interval for graceful period + long milliSecondsGracefulPeriod = mgmtServiceConf.getPingInterval() * 2000L; + Map instancePowerStates = new HashMap<>(); + for (VMInstanceVO instance : vmsThatAreMissingReport) { + Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + if (vmStateUpdateTime == null) { + logger.warn("VM power state update time is null, falling back to update time for {}", instance); + vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance); - vmStateUpdateTime = instance.getUpdateTime(); - if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm: {}", instance); - vmStateUpdateTime = instance.getCreated(); - } - } - - String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}", - host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime); - - long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); - - if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate); - - // this is were a race condition might have happened if we don't re-fetch the instance; - // between the startime of this job and the currentTime of this missing-branch - // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); - } else { - logger.debug("VM power state does not change, skip DB writing. vm: {}", instance); - } - } else { - logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate); + logger.warn("VM update time is null, falling back to creation time for {}", instance); + vmStateUpdateTime = instance.getCreated(); } } + logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}", + hostId, + instance.getId(), + instance.getUuid(), + VirtualMachine.PowerState.PowerReportMissing, + DateUtil.getOutputString(vmStateUpdateTime)); + long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); + if (force || (milliSecondsSinceLastStateUpdate > milliSecondsGracefulPeriod)) { + logger.debug("vm id: {} - time since last state update({} ms) has passed graceful period", + instance.getId(), milliSecondsSinceLastStateUpdate); + // this is where a race condition might have happened if we don't re-fetch the instance; + // between the startime of this job and the currentTime of this missing-branch + // an update might have occurred that we should not override in case of out of band migration + instancePowerStates.put(instance.getId(), VirtualMachine.PowerState.PowerReportMissing); + } else { + logger.debug("vm id: {} - time since last state update({} ms) has not passed graceful period yet", + instance.getId(), milliSecondsSinceLastStateUpdate); + } } - - logger.debug("Done with process of VM state report. host: {}", host); + updateAndPublishVmPowerStates(hostId, instancePowerStates, startTime); } - public Map> convertVmStateReport(Map states) { - final HashMap> map = new HashMap<>(); - if (states == null) { + private void processReport(long hostId, Map translatedInfo, boolean force) { + logger.debug("Process VM state report. {}, number of records in report: {}. VMs: [{}]", + () -> hostCache.get(hostId), + translatedInfo::size, + () -> translatedInfo.entrySet().stream().map(entry -> entry.getKey() + ":" + entry.getValue()) + .collect(Collectors.joining(", ")) + "]"); + updateAndPublishVmPowerStates(hostId, translatedInfo, DateUtil.currentGMTTime()); + + processMissingVmReport(hostId, translatedInfo.keySet(), force); + + logger.debug("Done with process of VM state report. host: {}", () -> hostCache.get(hostId)); + } + + public Map convertVmStateReport(Map states) { + final HashMap map = new HashMap<>(); + if (MapUtils.isEmpty(states)) { return map; } - + Map nameIdMap = _instanceDao.getNameIdMapForVmInstanceNames(states.keySet()); for (Map.Entry entry : states.entrySet()) { - VMInstanceVO vm = findVM(entry.getKey()); - if (vm != null) { - map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm)); + Long id = nameIdMap.get(entry.getKey()); + if (id != null) { + map.put(id, entry.getValue().getState()); } else { logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue()); } } - return map; } - private VMInstanceVO findVM(String vmName) { - return _instanceDao.findVMByInstanceName(vmName); + protected VMInstanceVO getVmFromId(long vmId) { + return _instanceDao.findById(vmId); + } + + protected HostVO getHostFromId(long hostId) { + return hostDao.findById(hostId); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index 053d9ac218e..8ef2de3f74d 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -372,6 +372,9 @@ public class EngineHostVO implements EngineHost, Identity { @Column(name = "mgmt_server_id") private Long managementServerId; + @Column(name = "last_mgmt_server_id") + private Long lastManagementServerId; + @Column(name = "dom0_memory") private long dom0MinMemory; @@ -556,6 +559,10 @@ public class EngineHostVO implements EngineHost, Identity { this.managementServerId = managementServerId; } + public void setLastManagementServerId(Long lastManagementServerId) { + this.lastManagementServerId = lastManagementServerId; + } + @Override public long getLastPinged() { return lastPinged; @@ -625,6 +632,11 @@ public class EngineHostVO implements EngineHost, Identity { return managementServerId; } + @Override + public Long getLastManagementServerId() { + return lastManagementServerId; + } + @Override public Date getDisconnectedOn() { return disconnectedOn; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 7efc29b02a6..b0081c6e685 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -4263,7 +4263,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException { - if (!(cmd instanceof StartupRoutingCommand)) { + if (!(cmd instanceof StartupRoutingCommand) || cmd.isConnectionTransferred()) { return; } final long hostId = host.getId(); @@ -4872,7 +4872,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[]{NetworkGcWait, NetworkGcInterval, NetworkLockTimeout, + return new ConfigKey[]{NetworkGcWait, NetworkGcInterval, NetworkLockTimeout, DeniedRoutes, GuestDomainSuffix, NetworkThrottlingRate, MinVRVersion, PromiscuousMode, MacAddressChanges, ForgedTransmits, MacLearning, RollingRestartEnabled, TUNGSTEN_ENABLED, NSX_ENABLED }; diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java index 9616f31d0c5..1bb79ce417a 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java @@ -28,6 +28,8 @@ import com.cloud.utils.db.GenericDao; public interface CapacityDao extends GenericDao { CapacityVO findByHostIdType(Long hostId, short capacityType); + List listByHostIdTypes(Long hostId, List capacityTypes); + List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone); List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType); diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java index 3acae985af4..5e7eee4566c 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -671,6 +671,18 @@ public class CapacityDaoImpl extends GenericDaoBase implements return findOneBy(sc); } + @Override + public List listByHostIdTypes(Long hostId, List capacityTypes) { + SearchBuilder sb = createSearchBuilder(); + sb.and("hostId", sb.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getCapacityType(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("hostId", hostId); + sc.setParameters("type", capacityTypes.toArray()); + return listBy(sc); + } + @Override public List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java index 06c9c525504..5daab544b21 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.Map; import com.cloud.utils.db.GenericDao; @@ -29,6 +30,8 @@ public interface ClusterDetailsDao extends GenericDao { ClusterDetailsVO findDetail(long clusterId, String name); + Map findDetails(long clusterId, Collection names); + void deleteDetails(long clusterId); String getVmwareDcName(Long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java index 0e40f8475c1..a4f6acb9057 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java @@ -16,13 +16,16 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; @@ -82,6 +85,23 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase findDetails(long clusterId, Collection names) { + if (CollectionUtils.isEmpty(names)) { + return new HashMap<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("name", names.toArray()); + List results = search(sc, null); + return results.stream() + .collect(Collectors.toMap(ClusterDetailsVO::getName, ClusterDetailsVO::getValue)); + } + @Override public void deleteDetails(long clusterId) { SearchCriteria sc = ClusterSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index 6ecfdaeb058..bf12abd5114 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -16,15 +16,15 @@ // under the License. package com.cloud.dc.dao; +import java.util.List; +import java.util.Map; +import java.util.Set; + import com.cloud.cpu.CPU; import com.cloud.dc.ClusterVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; -import java.util.List; -import java.util.Map; -import java.util.Set; - public interface ClusterDao extends GenericDao { List listByPodId(long podId); @@ -36,7 +36,7 @@ public interface ClusterDao extends GenericDao { List getAvailableHypervisorInZone(Long zoneId); - Set getDistictAvailableHypervisorsAcrossClusters(); + Set getDistinctAvailableHypervisorsAcrossClusters(); List listByDcHyType(long dcId, String hyType); @@ -46,9 +46,13 @@ public interface ClusterDao extends GenericDao { List listClustersWithDisabledPods(long zoneId); + Integer countAllByDcId(long zoneId); + + Integer countAllManagedAndEnabledByDcId(long zoneId); + List listClustersByDcId(long zoneId); - List listAllClusters(Long zoneId); + List listAllClusterIds(Long zoneId); boolean getSupportsResigning(long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index 9a56f0f2d94..af6b8397643 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -16,25 +16,6 @@ // under the License. package com.cloud.dc.dao; -import com.cloud.cpu.CPU; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterDetailsVO; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.org.Grouping; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.TransactionLegacy; -import com.cloud.utils.exception.CloudRuntimeException; -import org.springframework.stereotype.Component; - -import javax.inject.Inject; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -46,6 +27,28 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + +import com.cloud.cpu.CPU; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.org.Grouping; +import com.cloud.org.Managed; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; + @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -58,7 +61,6 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ClusterSearch; protected final SearchBuilder ClusterDistinctArchSearch; protected final SearchBuilder ClusterArchSearch; - protected GenericSearchBuilder ClusterIdSearch; private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; @@ -98,6 +100,8 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ZoneClusterSearch = createSearchBuilder(); ZoneClusterSearch.and("dataCenterId", ZoneClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneClusterSearch.and("allocationState", ZoneClusterSearch.entity().getAllocationState(), Op.EQ); + ZoneClusterSearch.and("managedState", ZoneClusterSearch.entity().getManagedState(), Op.EQ); ZoneClusterSearch.done(); ClusterIdSearch = createSearchBuilder(Long.class); @@ -167,23 +171,15 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("zoneId", zoneId); } List clusters = listBy(sc); - List hypers = new ArrayList(4); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - - return hypers; + return clusters.stream() + .map(ClusterVO::getHypervisorType) + .distinct() + .collect(Collectors.toList()); } @Override - public Set getDistictAvailableHypervisorsAcrossClusters() { - SearchCriteria sc = ClusterSearch.create(); - List clusters = listBy(sc); - Set hypers = new HashSet<>(); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - return hypers; + public Set getDistinctAvailableHypervisorsAcrossClusters() { + return new HashSet<>(getAvailableHypervisorInZone(null)); } @Override @@ -266,6 +262,23 @@ public class ClusterDaoImpl extends GenericDaoBase implements C return customSearch(sc, null); } + @Override + public Integer countAllByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + return getCount(sc); + } + + @Override + public Integer countAllManagedAndEnabledByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + sc.setParameters("allocationState", Grouping.AllocationState.Enabled); + sc.setParameters("managedState", Managed.ManagedState.Managed); + + return getCount(sc); + } + @Override public List listClustersByDcId(long zoneId) { SearchCriteria sc = ZoneClusterSearch.create(); @@ -289,7 +302,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C } @Override - public List listAllClusters(Long zoneId) { + public List listAllClusterIds(Long zoneId) { SearchCriteria sc = ClusterIdSearch.create(); if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index 48b9c83c64c..ba01e31f80a 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -294,8 +294,7 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase result = listBy(sc); - return result.size(); + return getCount(sc); } public DataCenterIpAddressDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index 1c29e6a944c..ff668249779 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -81,7 +81,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase sc = DcSearchAllocated.create(); sc.setParameters("physicalNetworkId", physicalNetworkId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index a449eb450cf..bd6768fa0dd 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -404,6 +404,9 @@ public class HostVO implements Host { @Column(name = "mgmt_server_id") private Long managementServerId; + @Column(name = "last_mgmt_server_id") + private Long lastManagementServerId; + @Column(name = "dom0_memory") private long dom0MinMemory; @@ -570,6 +573,10 @@ public class HostVO implements Host { this.managementServerId = managementServerId; } + public void setLastManagementServerId(Long lastManagementServerId) { + this.lastManagementServerId = lastManagementServerId; + } + @Override public long getLastPinged() { return lastPinged; @@ -639,6 +646,11 @@ public class HostVO implements Host { return managementServerId; } + @Override + public Long getLastManagementServerId() { + return lastManagementServerId; + } + @Override public Date getDisconnectedOn() { return disconnectedOn; diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index a2df6db44e5..cfd75b1a94b 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -27,6 +27,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.info.RunningHostCountInfo; import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; @@ -39,8 +40,14 @@ public interface HostDao extends GenericDao, StateDao status); + Integer countAllByTypeInZone(long zoneId, final Host.Type type); + Integer countUpAndEnabledHostsInZone(long zoneId); + + Pair countAllHostsAndCPUSocketsByType(Type type); + /** * Mark all hosts associated with a certain management server * as disconnected. @@ -75,32 +82,41 @@ public interface HostDao extends GenericDao, StateDao findHypervisorHostInCluster(long clusterId); + HostVO findAnyStateHypervisorHostInCluster(long clusterId); + HostVO findOldestExistentHypervisorHostInCluster(long clusterId); List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag); List findByDataCenterId(Long zoneId); + List listIdsByDataCenterId(Long zoneId); + List findByPodId(Long podId); + List listIdsByPodId(Long podId); + List findByClusterId(Long clusterId); + List listIdsByClusterId(Long clusterId); + + List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId); + + List listIdsByType(Type type); + + List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType); + List findByClusterIdAndEncryptionSupport(Long clusterId); /** - * Returns hosts that are 'Up' and 'Enabled' from the given Data Center/Zone + * Returns host Ids that are 'Up' and 'Enabled' from the given Data Center/Zone */ - List listByDataCenterId(long id); + List listEnabledIdsByDataCenterId(long id); /** - * Returns hosts that are from the given Data Center/Zone and at a given state (e.g. Creating, Enabled, Disabled, etc). + * Returns host Ids that are 'Up' and 'Disabled' from the given Data Center/Zone */ - List listByDataCenterIdAndState(long id, ResourceState state); - - /** - * Returns hosts that are 'Up' and 'Disabled' from the given Data Center/Zone - */ - List listDisabledByDataCenterId(long id); + List listDisabledIdsByDataCenterId(long id); List listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType); @@ -110,8 +126,6 @@ public interface HostDao extends GenericDao, StateDao listAllHostsThatHaveNoRuleTag(Host.Type type, Long clusterId, Long podId, Long dcId); - List listAllHostsByType(Host.Type type); - HostVO findByPublicIp(String publicIp); List listClustersByHostTag(String hostTagOnOffering); @@ -151,12 +165,23 @@ public interface HostDao extends GenericDao, StateDao listHostsWithActiveVMs(long offeringId); + List listHostsByMsAndDc(long msId, long dcId); + + List listHostsByMs(long msId); + /** * Retrieves the number of hosts/agents this {@see ManagementServer} has responsibility over. - * @param msid the id of the {@see ManagementServer} + * @param msId the id of the {@see ManagementServer} * @return the number of hosts/agents this {@see ManagementServer} has responsibility over */ - int countByMs(long msid); + int countByMs(long msId); + + /** + * Retrieves the host ids/agents this {@see ManagementServer} has responsibility over. + * @param msId the id of the {@see ManagementServer} + * @return the host ids/agents this {@see ManagementServer} has responsibility over + */ + List listByMs(long msId); /** * Retrieves the hypervisor versions of the hosts in the datacenter which are in Up state in ascending order @@ -171,4 +196,14 @@ public interface HostDao extends GenericDao, StateDao findClustersThatMatchHostTagRule(String computeOfferingTags); List listSsvmHostsWithPendingMigrateJobsOrderedByJobCount(); + + boolean isHostUp(long hostId); + + List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes); + + List listDistinctHypervisorTypes(final Long zoneId); + + List listByIds(final List ids); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 63950294654..54146e55049 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -45,8 +46,8 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.gpu.dao.HostGpuGroupsDao; import com.cloud.gpu.dao.VGPUTypesDao; -import com.cloud.host.Host; import com.cloud.host.DetailVO; +import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; @@ -59,6 +60,8 @@ import com.cloud.org.Grouping; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -74,19 +77,17 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; -import java.util.Arrays; - @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { - private static final String LIST_HOST_IDS_BY_COMPUTETAGS = "SELECT filtered.host_id, COUNT(filtered.tag) AS tag_count " - + "FROM (SELECT host_id, tag, is_tag_a_rule FROM host_tags GROUP BY host_id,tag) AS filtered " - + "WHERE tag IN(%s) AND is_tag_a_rule = 0 " + private static final String LIST_HOST_IDS_BY_HOST_TAGS = "SELECT filtered.host_id, COUNT(filtered.tag) AS tag_count " + + "FROM (SELECT host_id, tag, is_tag_a_rule FROM host_tags GROUP BY host_id,tag,is_tag_a_rule) AS filtered " + + "WHERE tag IN (%s) AND (is_tag_a_rule = 0 OR is_tag_a_rule IS NULL) " + "GROUP BY host_id " + "HAVING tag_count = %s "; private static final String SEPARATOR = ","; - private static final String LIST_CLUSTERID_FOR_HOST_TAG = "select distinct cluster_id from host join ( %s ) AS selected_hosts ON host.id = selected_hosts.host_id"; + private static final String LIST_CLUSTER_IDS_FOR_HOST_TAGS = "select distinct cluster_id from host join ( %s ) AS selected_hosts ON host.id = selected_hosts.host_id"; private static final String GET_HOSTS_OF_ACTIVE_VMS = "select h.id " + "from vm_instance vm " + "join host h on (vm.host_id=h.id) " + @@ -98,6 +99,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder TypePodDcStatusSearch; + protected SearchBuilder IdsSearch; protected SearchBuilder IdStatusSearch; protected SearchBuilder TypeDcSearch; protected SearchBuilder TypeDcStatusSearch; @@ -124,7 +126,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder UnmanagedApplianceSearch; protected SearchBuilder MaintenanceCountSearch; protected SearchBuilder HostTypeCountSearch; - protected SearchBuilder ResponsibleMsCountSearch; + protected SearchBuilder ResponsibleMsSearch; + protected SearchBuilder ResponsibleMsDcSearch; + protected GenericSearchBuilder ResponsibleMsIdSearch; + protected SearchBuilder HostTypeClusterCountSearch; protected SearchBuilder HostTypeZoneCountSearch; protected SearchBuilder ClusterStatusSearch; protected SearchBuilder TypeNameZoneSearch; @@ -136,8 +141,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder ManagedRoutingServersSearch; protected SearchBuilder SecondaryStorageVMSearch; - protected GenericSearchBuilder HostIdSearch; - protected GenericSearchBuilder HostsInStatusSearch; + protected GenericSearchBuilder HostsInStatusesSearch; protected GenericSearchBuilder CountRoutingByDc; protected SearchBuilder HostTransferSearch; protected SearchBuilder ClusterManagedSearch; @@ -187,11 +191,30 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostTypeCountSearch = createSearchBuilder(); HostTypeCountSearch.and("type", HostTypeCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("zoneId", HostTypeCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("resourceState", HostTypeCountSearch.entity().getResourceState(), SearchCriteria.Op.EQ); HostTypeCountSearch.done(); - ResponsibleMsCountSearch = createSearchBuilder(); - ResponsibleMsCountSearch.and("managementServerId", ResponsibleMsCountSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); - ResponsibleMsCountSearch.done(); + ResponsibleMsSearch = createSearchBuilder(); + ResponsibleMsSearch.and("managementServerId", ResponsibleMsSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); + ResponsibleMsSearch.done(); + + ResponsibleMsDcSearch = createSearchBuilder(); + ResponsibleMsDcSearch.and("managementServerId", ResponsibleMsDcSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); + ResponsibleMsDcSearch.and("dcId", ResponsibleMsDcSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ResponsibleMsDcSearch.done(); + + ResponsibleMsIdSearch = createSearchBuilder(String.class); + ResponsibleMsIdSearch.selectFields(ResponsibleMsIdSearch.entity().getUuid()); + ResponsibleMsIdSearch.and("managementServerId", ResponsibleMsIdSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); + ResponsibleMsIdSearch.done(); + + HostTypeClusterCountSearch = createSearchBuilder(); + HostTypeClusterCountSearch.and("cluster", HostTypeClusterCountSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("type", HostTypeClusterCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("status", HostTypeClusterCountSearch.entity().getStatus(), SearchCriteria.Op.IN); + HostTypeClusterCountSearch.and("removed", HostTypeClusterCountSearch.entity().getRemoved(), SearchCriteria.Op.NULL); + HostTypeClusterCountSearch.done(); HostTypeZoneCountSearch = createSearchBuilder(); HostTypeZoneCountSearch.and("type", HostTypeZoneCountSearch.entity().getType(), SearchCriteria.Op.EQ); @@ -240,6 +263,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.done(); + IdsSearch = createSearchBuilder(); + IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN); + IdsSearch.done(); + IdStatusSearch = createSearchBuilder(); IdStatusSearch.and("id", IdStatusSearch.entity().getId(), SearchCriteria.Op.EQ); IdStatusSearch.and("states", IdStatusSearch.entity().getStatus(), SearchCriteria.Op.IN); @@ -386,14 +413,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao AvailHypevisorInZone.groupBy(AvailHypevisorInZone.entity().getHypervisorType()); AvailHypevisorInZone.done(); - HostsInStatusSearch = createSearchBuilder(Long.class); - HostsInStatusSearch.selectFields(HostsInStatusSearch.entity().getId()); - HostsInStatusSearch.and("dc", HostsInStatusSearch.entity().getDataCenterId(), Op.EQ); - HostsInStatusSearch.and("pod", HostsInStatusSearch.entity().getPodId(), Op.EQ); - HostsInStatusSearch.and("cluster", HostsInStatusSearch.entity().getClusterId(), Op.EQ); - HostsInStatusSearch.and("type", HostsInStatusSearch.entity().getType(), Op.EQ); - HostsInStatusSearch.and("statuses", HostsInStatusSearch.entity().getStatus(), Op.IN); - HostsInStatusSearch.done(); + HostsInStatusesSearch = createSearchBuilder(Long.class); + HostsInStatusesSearch.selectFields(HostsInStatusesSearch.entity().getId()); + HostsInStatusesSearch.and("dc", HostsInStatusesSearch.entity().getDataCenterId(), Op.EQ); + HostsInStatusesSearch.and("pod", HostsInStatusesSearch.entity().getPodId(), Op.EQ); + HostsInStatusesSearch.and("cluster", HostsInStatusesSearch.entity().getClusterId(), Op.EQ); + HostsInStatusesSearch.and("type", HostsInStatusesSearch.entity().getType(), Op.EQ); + HostsInStatusesSearch.and("statuses", HostsInStatusesSearch.entity().getStatus(), Op.IN); + HostsInStatusesSearch.done(); CountRoutingByDc = createSearchBuilder(Long.class); CountRoutingByDc.select(null, Func.COUNT, null); @@ -456,11 +483,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostsInClusterSearch.and("server", HostsInClusterSearch.entity().getManagementServerId(), SearchCriteria.Op.NNULL); HostsInClusterSearch.done(); - HostIdSearch = createSearchBuilder(Long.class); - HostIdSearch.selectFields(HostIdSearch.entity().getId()); - HostIdSearch.and("dataCenterId", HostIdSearch.entity().getDataCenterId(), Op.EQ); - HostIdSearch.done(); - searchBuilderFindByRuleTag = _hostTagsDao.createSearchBuilder(); searchBuilderFindByRuleTag.and("is_tag_a_rule", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.EQ); searchBuilderFindByRuleTag.or("tagDoesNotExist", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.NULL); @@ -492,8 +514,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("resourceState", (Object[])states); sc.setParameters("cluster", clusterId); - List hosts = listBy(sc); - return hosts.size(); + return getCount(sc); } @Override @@ -504,36 +525,62 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } @Override - public Integer countAllByTypeInZone(long zoneId, Type type) { - SearchCriteria sc = HostTypeCountSearch.create(); - sc.setParameters("type", type); - sc.setParameters("dc", zoneId); + public Integer countAllInClusterByTypeAndStates(Long clusterId, final Host.Type type, List status) { + SearchCriteria sc = HostTypeClusterCountSearch.create(); + if (clusterId != null) { + sc.setParameters("cluster", clusterId); + } + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status.toArray()); + } return getCount(sc); } @Override - public List listByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Enabled); + public Integer countAllByTypeInZone(long zoneId, Type type) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", type); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listByDataCenterIdAndState(long id, ResourceState state) { - SearchCriteria sc = scHostsFromZoneUpRouting(id); - sc.setParameters("resourceState", state); - return listBy(sc); + public Integer countUpAndEnabledHostsInZone(long zoneId) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", Type.Routing); + sc.setParameters("resourceState", ResourceState.Enabled); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listDisabledByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Disabled); + public Pair countAllHostsAndCPUSocketsByType(Type type) { + GenericSearchBuilder sb = createSearchBuilder(SumCount.class); + sb.select("sum", Func.SUM, sb.entity().getCpuSockets()); + sb.select("count", Func.COUNT, null); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("type", type); + SumCount result = customSearch(sc, null).get(0); + return new Pair<>((int)result.count, (int)result.sum); } - private SearchCriteria scHostsFromZoneUpRouting(long id) { - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dc", id); - sc.setParameters("status", Status.Up); - sc.setParameters("type", Host.Type.Routing); - return sc; + private List listIdsForRoutingByZoneIdAndResourceState(long zoneId, ResourceState state) { + return listIdsBy(Type.Routing, Status.Up, state, null, zoneId, null, null); + } + + @Override + public List listEnabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Enabled); + } + + @Override + public List listDisabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Disabled); } @Override @@ -591,9 +638,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.append(" "); } - if (logger.isTraceEnabled()) { - logger.trace("Following hosts got reset: " + sb.toString()); - } + logger.trace("Following hosts got reset: {}", sb); } /* @@ -603,8 +648,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao SearchCriteria sc = ClustersOwnedByMSSearch.create(); sc.setParameters("server", managementServerId); - List clusters = customSearch(sc, null); - return clusters; + return customSearch(sc, null); } /* @@ -614,13 +658,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao SearchCriteria sc = ClustersForHostsNotOwnedByAnyMSSearch.create(); sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed); - List clusters = customSearch(sc, null); - return clusters; + return customSearch(sc, null); } /** * This determines if hosts belonging to cluster(@clusterId) are up for grabs - * * This is used for handling following cases: * 1. First host added in cluster * 2. During MS restart all hosts in a cluster are without any MS @@ -630,9 +672,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("cluster", clusterId); List hosts = search(sc, null); - boolean ownCluster = (hosts == null || hosts.size() == 0); - - return ownCluster; + return (hosts == null || hosts.isEmpty()); } @Override @@ -649,14 +689,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao logger.debug("Completed resetting hosts suitable for reconnect"); } - List assignedHosts = new ArrayList(); + List assignedHosts = new ArrayList<>(); if (logger.isDebugEnabled()) { logger.debug("Acquiring hosts for clusters already owned by this management server"); } List clusters = findClustersOwnedByManagementServer(managementServerId); txn.start(); - if (clusters.size() > 0) { + if (!clusters.isEmpty()) { // handle clusters already owned by @managementServerId SearchCriteria sc = UnmanagedDirectConnectSearch.create(); sc.setParameters("lastPinged", lastPingSecondsAfter); @@ -671,13 +711,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.append(host.getId()); sb.append(" "); } - if (logger.isTraceEnabled()) { - logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString()); - } - } - if (logger.isDebugEnabled()) { - logger.debug("Completed acquiring hosts for clusters already owned by this management server"); + logger.trace("Following hosts got acquired for clusters already owned: {}", sb); } + logger.debug("Completed acquiring hosts for clusters already owned by this management server"); if (assignedHosts.size() < limit) { if (logger.isDebugEnabled()) { @@ -689,7 +725,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao if (clusters.size() > limit) { updatedClusters = clusters.subList(0, limit.intValue()); } - if (updatedClusters.size() > 0) { + if (!updatedClusters.isEmpty()) { SearchCriteria sc = UnmanagedDirectConnectSearch.create(); sc.setParameters("lastPinged", lastPingSecondsAfter); sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed); @@ -697,10 +733,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao List unmanagedHosts = lockRows(sc, null, true); // group hosts based on cluster - Map> hostMap = new HashMap>(); + Map> hostMap = new HashMap<>(); for (HostVO host : unmanagedHosts) { if (hostMap.get(host.getClusterId()) == null) { - hostMap.put(host.getClusterId(), new ArrayList()); + hostMap.put(host.getClusterId(), new ArrayList<>()); } hostMap.get(host.getClusterId()).add(host); } @@ -721,13 +757,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao break; } } - if (logger.isTraceEnabled()) { - logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString()); - } - } - if (logger.isDebugEnabled()) { - logger.debug("Completed acquiring hosts for clusters not owned by any management server"); + logger.trace("Following hosts got acquired from newly owned clusters: {}", sb); } + logger.debug("Completed acquiring hosts for clusters not owned by any management server"); } txn.commit(); @@ -782,6 +814,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List listByHostTag(Host.Type type, Long clusterId, Long podId, Long dcId, String hostTag) { + return listHostsWithOrWithoutHostTags(type, clusterId, podId, dcId, hostTag, true); + } + + private List listHostsWithOrWithoutHostTags(Host.Type type, Long clusterId, Long podId, Long dcId, String hostTags, boolean withHostTags) { + if (StringUtils.isEmpty(hostTags)) { + logger.debug("Host tags not specified, to list hosts"); + return new ArrayList<>(); + } + SearchBuilder hostSearch = createSearchBuilder(); HostVO entity = hostSearch.entity(); hostSearch.and("type", entity.getType(), SearchCriteria.Op.EQ); @@ -792,7 +833,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao hostSearch.and("resourceState", entity.getResourceState(), SearchCriteria.Op.EQ); SearchCriteria sc = hostSearch.create(); - sc.setParameters("type", type.toString()); + if (type != null) { + sc.setParameters("type", type.toString()); + } if (podId != null) { sc.setParameters("pod", podId); } @@ -805,27 +848,38 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("status", Status.Up.toString()); sc.setParameters("resourceState", ResourceState.Enabled.toString()); - List tmpHosts = listBy(sc); - List correctHostsByHostTags = new ArrayList(); - List hostIdsByComputeOffTags = findHostByComputeOfferings(hostTag); + List upAndEnabledHosts = listBy(sc); + if (CollectionUtils.isEmpty(upAndEnabledHosts)) { + return new ArrayList<>(); + } - tmpHosts.forEach((host) -> { if(hostIdsByComputeOffTags.contains(host.getId())) correctHostsByHostTags.add(host);}); + List hostIdsByHostTags = findHostIdsByHostTags(hostTags); + if (CollectionUtils.isEmpty(hostIdsByHostTags)) { + return withHostTags ? new ArrayList<>() : upAndEnabledHosts; + } - return correctHostsByHostTags; + if (withHostTags) { + List upAndEnabledHostsWithHostTags = new ArrayList<>(); + upAndEnabledHosts.forEach((host) -> { if (hostIdsByHostTags.contains(host.getId())) upAndEnabledHostsWithHostTags.add(host);}); + return upAndEnabledHostsWithHostTags; + } else { + List upAndEnabledHostsWithoutHostTags = new ArrayList<>(); + upAndEnabledHosts.forEach((host) -> { if (!hostIdsByHostTags.contains(host.getId())) upAndEnabledHostsWithoutHostTags.add(host);}); + return upAndEnabledHostsWithoutHostTags; + } } @Override public List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag) { + if (StringUtils.isNotEmpty(haTag)) { + return listHostsWithOrWithoutHostTags(type, clusterId, podId, dcId, haTag, false); + } + SearchBuilder hostTagSearch = _hostTagsDao.createSearchBuilder(); hostTagSearch.and(); hostTagSearch.op("isTagARule", hostTagSearch.entity().getIsTagARule(), Op.EQ); hostTagSearch.or("tagDoesNotExist", hostTagSearch.entity().getIsTagARule(), Op.NULL); hostTagSearch.cp(); - if (haTag != null && !haTag.isEmpty()) { - hostTagSearch.and().op("tag", hostTagSearch.entity().getTag(), SearchCriteria.Op.NEQ); - hostTagSearch.or("tagNull", hostTagSearch.entity().getTag(), SearchCriteria.Op.NULL); - hostTagSearch.cp(); - } SearchBuilder hostSearch = createSearchBuilder(); @@ -836,18 +890,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao hostSearch.and("status", hostSearch.entity().getStatus(), SearchCriteria.Op.EQ); hostSearch.and("resourceState", hostSearch.entity().getResourceState(), SearchCriteria.Op.EQ); - hostSearch.join("hostTagSearch", hostTagSearch, hostSearch.entity().getId(), hostTagSearch.entity().getHostId(), JoinBuilder.JoinType.LEFTOUTER); - SearchCriteria sc = hostSearch.create(); sc.setJoinParameters("hostTagSearch", "isTagARule", false); - if (haTag != null && !haTag.isEmpty()) { - sc.setJoinParameters("hostTagSearch", "tag", haTag); - } - if (type != null) { sc.setParameters("type", type); } @@ -887,12 +935,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @DB @Override public List findLostHosts(long timeout) { - List result = new ArrayList(); + List result = new ArrayList<>(); String sql = "select h.id from host h left join cluster c on h.cluster_id=c.id where h.mgmt_server_id is not null and h.last_ping < ? and h.status in ('Up', 'Updating', 'Disconnected', 'Connecting') and h.type not in ('ExternalFirewall', 'ExternalLoadBalancer', 'TrafficMonitor', 'SecondaryStorage', 'LocalSecondaryStorage', 'L2Networking') and (h.cluster_id is null or c.managed_state = 'Managed') ;"; try (TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = txn.prepareStatement(sql);) { + PreparedStatement pstmt = txn.prepareStatement(sql)) { pstmt.setLong(1, timeout); - try (ResultSet rs = pstmt.executeQuery();) { + try (ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); //ID column result.add(findById(id)); @@ -925,7 +973,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HashMap> groupDetails = host.getGpuGroupDetails(); if (groupDetails != null) { // Create/Update GPU group entries - _hostGpuGroupsDao.persist(host.getId(), new ArrayList(groupDetails.keySet())); + _hostGpuGroupsDao.persist(host.getId(), new ArrayList<>(groupDetails.keySet())); // Create/Update VGPU types entries _vgpuTypesDao.persist(host.getId(), groupDetails); } @@ -968,7 +1016,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao boolean persisted = super.update(hostId, host); if (!persisted) { - return persisted; + return false; } saveDetails(host); @@ -977,7 +1025,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao txn.commit(); - return persisted; + return true; } @Override @@ -988,11 +1036,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao + "select h.data_center_id, h.type, count(*) as count from host as h INNER JOIN mshost as m ON h.mgmt_server_id=m.msid " + "where h.status='Up' and h.type='Routing' and m.last_update > ? " + "group by h.data_center_id, h.type) as t " + "ORDER by t.data_center_id, t.type"; - ArrayList l = new ArrayList(); + ArrayList l = new ArrayList<>(); TransactionLegacy txn = TransactionLegacy.currentTxn(); - ; - PreparedStatement pstmt = null; + PreparedStatement pstmt; try { pstmt = txn.prepareAutoCloseStatement(sql); String gmtCutTime = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); @@ -1016,9 +1063,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public long getNextSequence(long hostId) { - if (logger.isTraceEnabled()) { - logger.trace("getNextSequence(), hostId: " + hostId); - } + logger.trace("getNextSequence(), hostId: {}", hostId); TableGenerator tg = _tgs.get("host_req_sq"); assert tg != null : "how can this be wrong!"; @@ -1087,31 +1132,30 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); + // TODO handle this if(debug){}else{log.debug} it makes no sense if (logger.isDebugEnabled()) { - - StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); - str.append(". Name=").append(host.getName()); - str.append("; New=[status=").append(newStatus.toString()).append(":msid=").append(newStatus.lostConnection() ? "null" : host.getManagementServerId()) - .append(":lastpinged=").append(host.getLastPinged()).append("]"); - str.append("; Old=[status=").append(oldStatus.toString()).append(":msid=").append(host.getManagementServerId()).append(":lastpinged=").append(oldPingTime) - .append("]"); - str.append("; DB=[status=").append(vo.getStatus().toString()).append(":msid=").append(vo.getManagementServerId()).append(":lastpinged=").append(vo.getLastPinged()) - .append(":old update count=").append(oldUpdateCount).append("]"); - logger.debug(str.toString()); + String str = "Unable to update host for event:" + event + + ". Name=" + host.getName() + + "; New=[status=" + newStatus + ":msid=" + (newStatus.lostConnection() ? "null" : host.getManagementServerId()) + + ":lastpinged=" + host.getLastPinged() + "]" + + "; Old=[status=" + oldStatus.toString() + ":msid=" + host.getManagementServerId() + ":lastpinged=" + oldPingTime + + "]" + + "; DB=[status=" + vo.getStatus().toString() + ":msid=" + vo.getManagementServerId() + ":lastpinged=" + vo.getLastPinged() + + ":old update count=" + oldUpdateCount + "]"; + logger.debug(str); } else { - StringBuilder msg = new StringBuilder("Agent status update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old status = " + oldStatus); - msg.append("; event = " + event); - msg.append("; new status = " + newStatus); - msg.append("; old update count = " + oldUpdateCount); - msg.append("; new update count = " + newUpdateCount + "]"); - logger.debug(msg.toString()); + String msg = "Agent status update: [" + "id = " + host.getId() + + "; name = " + host.getName() + + "; old status = " + oldStatus + + "; event = " + event + + "; new status = " + newStatus + + "; old update count = " + oldUpdateCount + + "; new update count = " + newUpdateCount + "]"; + logger.debug(msg); } if (ho.getState() == newStatus) { - logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus); + logger.debug("Host {} state has already been updated to {}", ho.getName(), newStatus); return true; } } @@ -1137,25 +1181,24 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao int result = update(ub, sc, null); assert result <= 1 : "How can this update " + result + " rows? "; + // TODO handle this if(debug){}else{log.debug} it makes no sense if (logger.isDebugEnabled() && result == 0) { HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); - StringBuilder str = new StringBuilder("Unable to update resource state: ["); - str.append("m = " + host.getId()); - str.append("; name = " + host.getName()); - str.append("; old state = " + oldState); - str.append("; event = " + event); - str.append("; new state = " + newState + "]"); - logger.debug(str.toString()); + String str = "Unable to update resource state: [" + "m = " + host.getId() + + "; name = " + host.getName() + + "; old state = " + oldState + + "; event = " + event + + "; new state = " + newState + "]"; + logger.debug(str); } else { - StringBuilder msg = new StringBuilder("Resource state update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old state = " + oldState); - msg.append("; event = " + event); - msg.append("; new state = " + newState + "]"); - logger.debug(msg.toString()); + String msg = "Resource state update: [" + "id = " + host.getId() + + "; name = " + host.getName() + + "; old state = " + oldState + + "; event = " + event + + "; new state = " + newState + "]"; + logger.debug(msg); } return result > 0; @@ -1178,6 +1221,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listIdsByDataCenterId(Long zoneId) { + return listIdsBy(Type.Routing, null, null, null, zoneId, null, null); + } + @Override public List findByPodId(Long podId) { SearchCriteria sc = PodSearch.create(); @@ -1185,6 +1233,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listIdsByPodId(Long podId) { + return listIdsBy(null, null, null, null, null, podId, null); + } + @Override public List findByClusterId(Long clusterId) { SearchCriteria sc = ClusterSearch.create(); @@ -1192,6 +1245,63 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + protected List listIdsBy(Host.Type type, Status status, ResourceState resourceState, + HypervisorType hypervisorType, Long zoneId, Long podId, Long clusterId) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status); + } + if (resourceState != null) { + sc.setParameters("resourceState", resourceState); + } + if (hypervisorType != null) { + sc.setParameters("hypervisorType", hypervisorType); + } + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (podId != null) { + sc.setParameters("podId", podId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + return customSearch(sc, null); + } + + @Override + public List listIdsByClusterId(Long clusterId) { + return listIdsBy(null, null, null, null, null, null, clusterId); + } + + @Override + public List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId) { + return listIdsBy(Type.Routing, Status.Up, null, null, zoneId, podId, clusterId); + } + + @Override + public List listIdsByType(Type type) { + return listIdsBy(type, null, null, null, null, null, null); + } + + @Override + public List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType) { + return listIdsBy(null, Status.Up, ResourceState.Enabled, hypervisorType, zoneId, null, null); + } + @Override public List findByClusterIdAndEncryptionSupport(Long clusterId) { SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder(); @@ -1244,6 +1354,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { + SearchCriteria sc = TypeClusterStatusSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("cluster", clusterId); + List list = listBy(sc, new Filter(1)); + return list.isEmpty() ? null : list.get(0); + } + @Override public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { SearchCriteria sc = TypeClusterStatusSearch.create(); @@ -1254,7 +1373,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao Filter orderByFilter = new Filter(HostVO.class, "created", true, null, null); List hosts = search(sc, orderByFilter, null, false); - if (hosts != null && hosts.size() > 0) { + if (hosts != null && !hosts.isEmpty()) { return hosts.get(0); } @@ -1263,9 +1382,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List listAllHosts(long zoneId) { - SearchCriteria sc = HostIdSearch.create(); - sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); - return customSearch(sc, null); + return listIdsBy(null, null, null, null, zoneId, null, null); } @Override @@ -1299,19 +1416,19 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } @Override - public List listClustersByHostTag(String computeOfferingTags) { + public List listClustersByHostTag(String hostTags) { TransactionLegacy txn = TransactionLegacy.currentTxn(); - String sql = this.LIST_CLUSTERID_FOR_HOST_TAG; - PreparedStatement pstmt = null; - List result = new ArrayList(); - List tags = Arrays.asList(computeOfferingTags.split(this.SEPARATOR)); - String subselect = getHostIdsByComputeTags(tags); - sql = String.format(sql, subselect); + String selectStmtToListClusterIdsByHostTags = LIST_CLUSTER_IDS_FOR_HOST_TAGS; + PreparedStatement pstmt; + List result = new ArrayList<>(); + List tags = Arrays.asList(hostTags.split(SEPARATOR)); + String selectStmtToListHostIdsByHostTags = getSelectStmtToListHostIdsByHostTags(tags); + selectStmtToListClusterIdsByHostTags = String.format(selectStmtToListClusterIdsByHostTags, selectStmtToListHostIdsByHostTags); try { - pstmt = txn.prepareStatement(sql); + pstmt = txn.prepareStatement(selectStmtToListClusterIdsByHostTags); - for(int i = 0; i < tags.size(); i++){ + for (int i = 0; i < tags.size(); i++){ pstmt.setString(i+1, tags.get(i)); } @@ -1322,20 +1439,20 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao pstmt.close(); return result; } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + sql, e); + throw new CloudRuntimeException("DB Exception on: " + selectStmtToListClusterIdsByHostTags, e); } } - private List findHostByComputeOfferings(String computeOfferingTags){ + private List findHostIdsByHostTags(String hostTags){ TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; - List result = new ArrayList(); - List tags = Arrays.asList(computeOfferingTags.split(this.SEPARATOR)); - String select = getHostIdsByComputeTags(tags); + PreparedStatement pstmt; + List result = new ArrayList<>(); + List tags = Arrays.asList(hostTags.split(SEPARATOR)); + String selectStmtToListHostIdsByHostTags = getSelectStmtToListHostIdsByHostTags(tags); try { - pstmt = txn.prepareStatement(select); + pstmt = txn.prepareStatement(selectStmtToListHostIdsByHostTags); - for(int i = 0; i < tags.size(); i++){ + for (int i = 0; i < tags.size(); i++){ pstmt.setString(i+1, tags.get(i)); } @@ -1346,7 +1463,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao pstmt.close(); return result; } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + select, e); + throw new CloudRuntimeException("DB Exception on: " + selectStmtToListHostIdsByHostTags, e); } } @@ -1396,16 +1513,16 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return result; } - private String getHostIdsByComputeTags(List offeringTags){ - List questionMarks = new ArrayList(); - offeringTags.forEach((tag) -> { questionMarks.add("?"); }); - return String.format(this.LIST_HOST_IDS_BY_COMPUTETAGS, String.join(",", questionMarks),questionMarks.size()); + private String getSelectStmtToListHostIdsByHostTags(List hostTags){ + List questionMarks = new ArrayList<>(); + hostTags.forEach((tag) -> questionMarks.add("?")); + return String.format(LIST_HOST_IDS_BY_HOST_TAGS, String.join(SEPARATOR, questionMarks), questionMarks.size()); } @Override public List listHostsWithActiveVMs(long offeringId) { TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; + PreparedStatement pstmt; List result = new ArrayList<>(); StringBuilder sql = new StringBuilder(GET_HOSTS_OF_ACTIVE_VMS); try { @@ -1424,15 +1541,37 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } @Override - public int countByMs(long msid) { - SearchCriteria sc = ResponsibleMsCountSearch.create(); - sc.setParameters("managementServerId", msid); + public List listHostsByMsAndDc(long msId, long dcId) { + SearchCriteria sc = ResponsibleMsDcSearch.create(); + sc.setParameters("managementServerId", msId); + sc.setParameters("dcId", dcId); + return listBy(sc); + } + + @Override + public List listHostsByMs(long msId) { + SearchCriteria sc = ResponsibleMsSearch.create(); + sc.setParameters("managementServerId", msId); + return listBy(sc); + } + + @Override + public int countByMs(long msId) { + SearchCriteria sc = ResponsibleMsSearch.create(); + sc.setParameters("managementServerId", msId); return getCount(sc); } + @Override + public List listByMs(long msId) { + SearchCriteria sc = ResponsibleMsIdSearch.create(); + sc.addAnd("managementServerId", SearchCriteria.Op.EQ, msId); + return customSearch(sc, null); + } + @Override public List listOrderedHostsHypervisorVersionsInDatacenter(long datacenterId, HypervisorType hypervisorType) { - PreparedStatement pstmt = null; + PreparedStatement pstmt; List result = new ArrayList<>(); try { TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -1449,15 +1588,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return result; } - @Override - public List listAllHostsByType(Host.Type type) { - SearchCriteria sc = TypeSearch.create(); - sc.setParameters("type", type); - sc.setParameters("resourceState", ResourceState.Enabled); - - return listBy(sc); - } - @Override public List listByType(Host.Type type) { SearchCriteria sc = TypeSearch.create(); @@ -1602,4 +1732,71 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } return String.format(sqlFindHostInZoneToExecuteCommand, hostResourceStatus); } + + @Override + public boolean isHostUp(long hostId) { + GenericSearchBuilder sb = createSearchBuilder(Status.class); + sb.and("id", sb.entity().getId(), Op.EQ); + sb.selectFields(sb.entity().getStatus()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", hostId); + List statuses = customSearch(sc, null); + return CollectionUtils.isNotEmpty(statuses) && Status.Up.equals(statuses.get(0)); + } + + @Override + public List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.IN); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.IN); + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sb.and().op(sb.entity().getHypervisorType(), SearchCriteria.Op.NULL); + sb.or("hypervisorTypes", sb.entity().getHypervisorType(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sc.setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + sc.setParameters("resourceState", resourceStates.toArray()); + sc.setParameters("type", types.toArray()); + return customSearch(sc, null); + } + + @Override + public List listDistinctHypervisorTypes(final Long zoneId) { + GenericSearchBuilder sb = createSearchBuilder(HypervisorType.class); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.select(null, Func.DISTINCT, sb.entity().getHypervisorType()); + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + sc.setParameters("type", Type.Routing); + return customSearch(sc, null); + } + + @Override + public List listByIds(List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsSearch.create(); + sc.setParameters("id", ids.toArray()); + return search(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java index aa143838c34..5499d04e3a1 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java @@ -421,7 +421,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen public long countFreeIpsInVlan(long vlanDbId) { SearchCriteria sc = VlanDbIdSearchUnallocated.create(); sc.setParameters("vlanDbId", vlanDbId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java index fa448b026e4..0aae532eac5 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java @@ -415,8 +415,7 @@ public class NetworkDaoImpl extends GenericDaoBaseimplements Ne sc.setParameters("broadcastUri", broadcastURI); sc.setParameters("guestType", guestTypes); sc.setJoinParameters("persistent", "persistent", isPersistent); - List persistentNetworks = search(sc, null); - return persistentNetworks.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index a37acdf6029..8229c3a62fc 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -55,8 +55,7 @@ public class CommandExecLogDaoImpl extends GenericDaoBase sc = CommandSearch.create(); sc.setParameters("host_id", id); sc.setParameters("command_name", "CopyCommand"); - List copyCmds = customSearch(sc, null); - return copyCmds.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index 48e63d8e2b5..ceb5b0a4fc1 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,7 +54,7 @@ public interface ServiceOfferingDao extends GenericDao List listPublicByCpuAndMemory(Integer cpus, Integer memory); - List listByHostTag(String tag); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); + + List listIdsByHostTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 706dcdc1b7b..803522fa6aa 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -34,6 +34,7 @@ import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; @@ -293,8 +294,9 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase listByHostTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByHostTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tagNotNull", sb.entity().getHostTag(), SearchCriteria.Op.NNULL); sb.and().op("tagEq", sb.entity().getHostTag(), SearchCriteria.Op.EQ); sb.or("tagStartLike", sb.entity().getHostTag(), SearchCriteria.Op.LIKE); @@ -302,11 +304,12 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase sc = sb.create(); + SearchCriteria sc = sb.create(); + sc.setParameters("tagEq", tag); sc.setParameters("tagStartLike", tag + ",%"); sc.setParameters("tagMidLike", "%," + tag + ",%"); sc.setParameters("tagEndLike", "%," + tag); - return listBy(sc); + return customSearch(sc, null); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java index 62ef5b7570d..639c2571541 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java @@ -34,7 +34,7 @@ public interface StoragePoolHostDao extends GenericDao List findHostsConnectedToPools(List poolIds); - List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); + boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public void deletePrimaryRecordsForHost(long hostId); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index 987a42f410e..5a466af348c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -55,11 +55,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { - ArrayList> l = new ArrayList>(); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("SQLException: ", e); } - return l; + return false; } /** diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 1c5a2cb4256..3ac514530ce 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -67,6 +67,8 @@ public interface VMTemplateDao extends GenericDao, StateDao< public List userIsoSearch(boolean listRemoved); + List listAllReadySystemVMTemplates(Long zoneId); + VMTemplateVO findSystemVMTemplate(long zoneId); VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType); @@ -91,6 +93,5 @@ public interface VMTemplateDao extends GenericDao, StateDao< List listByIds(List ids); - List listByTemplateTag(String tag); - + List listIdsByTemplateTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 4665f660251..7513848536b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -344,19 +344,12 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem readySystemTemplateSearch = createSearchBuilder(); readySystemTemplateSearch.and("state", readySystemTemplateSearch.entity().getState(), SearchCriteria.Op.EQ); readySystemTemplateSearch.and("templateType", readySystemTemplateSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); + readySystemTemplateSearch.and("hypervisorType", readySystemTemplateSearch.entity().getHypervisorType(), SearchCriteria.Op.IN); SearchBuilder templateDownloadSearch = _templateDataStoreDao.createSearchBuilder(); templateDownloadSearch.and("downloadState", templateDownloadSearch.entity().getDownloadState(), SearchCriteria.Op.IN); readySystemTemplateSearch.join("vmTemplateJoinTemplateStoreRef", templateDownloadSearch, templateDownloadSearch.entity().getTemplateId(), readySystemTemplateSearch.entity().getId(), JoinBuilder.JoinType.INNER); - SearchBuilder hostHyperSearch2 = _hostDao.createSearchBuilder(); - hostHyperSearch2.and("type", hostHyperSearch2.entity().getType(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("zoneId", hostHyperSearch2.entity().getDataCenterId(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("removed", hostHyperSearch2.entity().getRemoved(), SearchCriteria.Op.NULL); - hostHyperSearch2.groupBy(hostHyperSearch2.entity().getHypervisorType()); - - readySystemTemplateSearch.join("tmplHyper", hostHyperSearch2, hostHyperSearch2.entity().getHypervisorType(), readySystemTemplateSearch.entity() - .getHypervisorType(), JoinBuilder.JoinType.INNER); - hostHyperSearch2.done(); + readySystemTemplateSearch.groupBy(readySystemTemplateSearch.entity().getId()); readySystemTemplateSearch.done(); tmpltTypeHyperSearch2 = createSearchBuilder(); @@ -556,29 +549,35 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + public List listAllReadySystemVMTemplates(Long zoneId) { + List availableHypervisors = _hostDao.listDistinctHypervisorTypes(zoneId); + if (CollectionUtils.isEmpty(availableHypervisors)) { + return Collections.emptyList(); + } SearchCriteria sc = readySystemTemplateSearch.create(); sc.setParameters("templateType", Storage.TemplateType.SYSTEM); sc.setParameters("state", VirtualMachineTemplate.State.Active); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", new VMTemplateStorageResourceAssoc.Status[] {VMTemplateStorageResourceAssoc.Status.DOWNLOADED, VMTemplateStorageResourceAssoc.Status.BYPASSED}); - + sc.setParameters("hypervisorType", availableHypervisors.toArray()); + sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", + List.of(VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + VMTemplateStorageResourceAssoc.Status.BYPASSED).toArray()); // order by descending order of id - List tmplts = listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); - - if (tmplts.size() > 0) { - if (hypervisorType == HypervisorType.Any) { - return tmplts.get(0); - } - for (VMTemplateVO tmplt : tmplts) { - if (tmplt.getHypervisorType() == hypervisorType) { - return tmplt; - } - } + return listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); + } + @Override + public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + List templates = listAllReadySystemVMTemplates(zoneId); + if (CollectionUtils.isEmpty(templates)) { + return null; } - return null; + if (hypervisorType == HypervisorType.Any) { + return templates.get(0); + } + return templates.stream() + .filter(t -> t.getHypervisorType() == hypervisorType) + .findFirst() + .orElse(null); } @Override @@ -687,13 +686,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public List listByTemplateTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByTemplateTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tag", sb.entity().getTemplateTag(), SearchCriteria.Op.EQ); sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); sc.setParameters("tag", tag); - return listIncludingRemovedBy(sc); + return customSearchIncludingRemoved(sc, null); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 0c4d707635a..750dbf2bee0 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -571,14 +571,6 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } } - public static class SumCount { - public long sum; - public long count; - - public SumCount() { - } - } - @Override public List listVolumesToBeDestroyed() { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index b197fb7c030..12049b6f240 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -870,7 +870,7 @@ public class SystemVmTemplateRegistration { public void doInTransactionWithoutResult(final TransactionStatus status) { Set hypervisorsListInUse = new HashSet(); try { - hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters(); + hypervisorsListInUse = clusterDao.getDistinctAvailableHypervisorsAcrossClusters(); } catch (final Exception e) { LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java index 1c2c4b3c7ce..0b973d195de 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java @@ -114,6 +114,17 @@ public class DatabaseAccessObject { } } + public void renameIndex(Connection conn, String tableName, String oldName, String newName) { + String stmt = String.format("ALTER TABLE %s RENAME INDEX %s TO %s", tableName, oldName, newName); + logger.debug("Statement: {}", stmt); + try (PreparedStatement pstmt = conn.prepareStatement(stmt)) { + pstmt.execute(); + logger.debug("Renamed index {} to {}", oldName, newName); + } catch (SQLException e) { + logger.warn("Unable to rename index {} to {}", oldName, newName, e); + } + } + protected void closePreparedStatement(PreparedStatement pstmt, String errorMessage) { try { if (pstmt != null) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java index 51e6ac7b9a1..2f90422adf8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java @@ -31,6 +31,12 @@ public class DbUpgradeUtils { } } + public static void renameIndexIfNeeded(Connection conn, String tableName, String oldName, String newName) { + if (!dao.indexExists(conn, tableName, oldName)) { + dao.renameIndex(conn, tableName, oldName, newName); + } + } + public static void addForeignKey(Connection conn, String tableName, String tableColumn, String foreignTableName, String foreignColumnName) { dao.addForeignKey(conn, tableName, tableColumn, foreignTableName, foreignColumnName); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java index 197ca1cb34c..6298e0e729a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java @@ -53,6 +53,7 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr @Override public void performDataMigration(Connection conn) { + addIndexes(conn); } @Override @@ -80,4 +81,42 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } } + + private void addIndexes(Connection conn) { + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "mgmt_server_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource_state"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "type"); + + DbUpgradeUtils.renameIndexIfNeeded(conn, "user_ip_address", "public_ip_address", "uk_public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "vlan_db_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "vlan_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "resource_id", "resource_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "op_host_planner_reservation", "resource_usage"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "pool_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "data_center_id", "status", "scope", "hypervisor"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "router_network_ref", "guest_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "domain_router", "role"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "async_job", "instance_type", "job_status"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "cluster", "managed_state"); + } } diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index d204f67dc93..e4fcbad6b02 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -33,11 +33,11 @@ import javax.persistence.Table; import javax.persistence.Transient; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.lang3.StringUtils; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.commons.lang3.StringUtils; @Entity @Table(name = "user") @@ -131,12 +131,6 @@ public class UserAccountVO implements UserAccount, InternalIdentity { public UserAccountVO() { } - @Override - public String toString() { - return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields - (this, "id", "uuid", "username", "accountName")); - } - @Override public long getId() { return id; @@ -379,4 +373,10 @@ public class UserAccountVO implements UserAccount, InternalIdentity { public void setDetails(Map details) { this.details = details; } + + @Override + public String toString() { + return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields + (this, "id", "uuid", "username", "accountName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java index cb19748fda4..af32163b4c7 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java @@ -45,7 +45,7 @@ public interface ConsoleProxyDao extends GenericDao { public List getDatacenterSessionLoadMatrix(); - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public List> getProxyLoadMatrix(); diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java index ef94a4d9f72..bc79194a10f 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; - import org.springframework.stereotype.Component; import com.cloud.info.ConsoleProxyLoadInfo; @@ -76,11 +75,11 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im private static final String GET_PROXY_ACTIVE_LOAD = "SELECT active_session AS count" + " FROM console_proxy" + " WHERE id=?"; - private static final String STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph WHERE " + + "ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ?"; - private static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.pool_type <> 'LVM' AND p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph " + + "WHERE ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ? AND p.pool_type NOT IN ('LVM', 'Filesystem')"; protected SearchBuilder DataCenterStatusSearch; protected SearchBuilder StateSearch; @@ -219,28 +218,23 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im } @Override - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes) { - ArrayList> l = new ArrayList>(); - + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; + String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - ; - PreparedStatement pstmt = null; - try { - if (countAllPoolTypes) { - pstmt = txn.prepareAutoCloseStatement(STORAGE_POOL_HOST_INFO); - } else { - pstmt = txn.prepareAutoCloseStatement(SHARED_STORAGE_POOL_HOST_INFO); - } + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("Caught SQLException: ", e); } - return l; + return false; } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java index 887b3d73087..44866c0a358 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java @@ -170,8 +170,7 @@ public class NicIpAliasDaoImpl extends GenericDaoBase implem public Integer countAliasIps(long id) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instanceId", id); - List list = listBy(sc); - return list.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 52bc5aac7e2..823642d8c3d 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm.dao; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -81,7 +82,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByHostAndState(long hostId, State... states); - List listByTypes(VirtualMachine.Type... types); + int countByTypes(VirtualMachine.Type... types); VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); @@ -144,21 +145,28 @@ public interface VMInstanceDao extends GenericDao, StateDao< */ List listDistinctHostNames(long networkId, VirtualMachine.Type... types); + List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states); + List findByHostInStates(Long hostId, State... states); List listStartingWithNoHostId(); boolean updatePowerState(long instanceId, long powerHostId, VirtualMachine.PowerState powerState, Date wisdomEra); + Map updatePowerState(Map instancePowerStates, + long powerHostId, Date wisdomEra); + void resetVmPowerStateTracking(long instanceId); + void resetVmPowerStateTracking(List instanceId); + void resetHostPowerStateTracking(long hostId); HashMap countVgpuVMs(Long dcId, Long podId, Long clusterId); VMInstanceVO findVMByHostNameInZone(String hostName, long zoneId); - boolean isPowerStateUpToDate(long instanceId); + boolean isPowerStateUpToDate(VMInstanceVO instance); List listNonMigratingVmsByHostEqualsLastHost(long hostId); @@ -170,4 +178,13 @@ public interface VMInstanceDao extends GenericDao, StateDao< List skippedVmIds); Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId); + + List listIdServiceOfferingForUpVmsByHostId(Long hostId); + + List listIdServiceOfferingForVmsMigratingFromHost(Long hostId); + + Map getNameIdMapForVmInstanceNames(Collection names); + + Map getNameIdMapForVmIds(Collection ids); + } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 0e87e6bcb7d..ef10af63bae 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -75,6 +76,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder LHVMClusterSearch; protected SearchBuilder IdStatesSearch; protected SearchBuilder AllFieldsSearch; + protected SearchBuilder IdServiceOfferingIdSelectSearch; protected SearchBuilder ZoneTemplateNonExpungedSearch; protected SearchBuilder TemplateNonExpungedSearch; protected SearchBuilder NameLikeSearch; @@ -101,6 +103,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder BackupSearch; protected SearchBuilder LastHostAndStatesSearch; protected SearchBuilder VmsNotInClusterUsingPool; + protected SearchBuilder IdsPowerStateSelectSearch; @Inject ResourceTagDao tagsDao; @@ -175,6 +178,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem AllFieldsSearch.and("account", AllFieldsSearch.entity().getAccountId(), Op.EQ); AllFieldsSearch.done(); + IdServiceOfferingIdSelectSearch = createSearchBuilder(); + IdServiceOfferingIdSelectSearch.and("host", IdServiceOfferingIdSelectSearch.entity().getHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("lastHost", IdServiceOfferingIdSelectSearch.entity().getLastHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("state", IdServiceOfferingIdSelectSearch.entity().getState(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("states", IdServiceOfferingIdSelectSearch.entity().getState(), Op.IN); + IdServiceOfferingIdSelectSearch.selectFields(IdServiceOfferingIdSelectSearch.entity().getId(), IdServiceOfferingIdSelectSearch.entity().getServiceOfferingId()); + IdServiceOfferingIdSelectSearch.done(); + ZoneTemplateNonExpungedSearch = createSearchBuilder(); ZoneTemplateNonExpungedSearch.and("zone", ZoneTemplateNonExpungedSearch.entity().getDataCenterId(), Op.EQ); ZoneTemplateNonExpungedSearch.and("template", ZoneTemplateNonExpungedSearch.entity().getTemplateId(), Op.EQ); @@ -274,6 +285,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem HostAndStateSearch = createSearchBuilder(); HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ); HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN); + HostAndStateSearch.and("idsNotIn", HostAndStateSearch.entity().getId(), Op.NIN); HostAndStateSearch.done(); StartingWithNoHostSearch = createSearchBuilder(); @@ -323,6 +335,15 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER); VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN); VmsNotInClusterUsingPool.done(); + + IdsPowerStateSelectSearch = createSearchBuilder(); + IdsPowerStateSelectSearch.and("id", IdsPowerStateSelectSearch.entity().getId(), Op.IN); + IdsPowerStateSelectSearch.selectFields(IdsPowerStateSelectSearch.entity().getId(), + IdsPowerStateSelectSearch.entity().getPowerHostId(), + IdsPowerStateSelectSearch.entity().getPowerState(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateCount(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateTime()); + IdsPowerStateSelectSearch.done(); } @Override @@ -458,10 +479,10 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public List listByTypes(Type... types) { + public int countByTypes(Type... types) { SearchCriteria sc = TypesSearch.create(); sc.setParameters("types", (Object[])types); - return listBy(sc); + return getCount(sc); } @Override @@ -897,6 +918,17 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return result; } + @Override + public List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states) { + SearchCriteria sc = HostAndStateSearch.create(); + sc.setParameters("host", hostId); + if (excludingIds != null && !excludingIds.isEmpty()) { + sc.setParameters("idsNotIn", excludingIds.toArray()); + } + sc.setParameters("states", (Object[])states); + return listBy(sc); + } + @Override public List findByHostInStates(Long hostId, State... states) { SearchCriteria sc = HostAndStateSearch.create(); @@ -912,42 +944,109 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - @Override - public boolean updatePowerState(final long instanceId, final long powerHostId, final VirtualMachine.PowerState powerState, Date wisdomEra) { - return Transaction.execute(new TransactionCallback<>() { - @Override - public Boolean doInTransaction(TransactionStatus status) { - boolean needToUpdate = false; - VMInstanceVO instance = findById(instanceId); - if (instance != null - && (null == instance.getPowerStateUpdateTime() - || instance.getPowerStateUpdateTime().before(wisdomEra))) { - Long savedPowerHostId = instance.getPowerHostId(); - if (instance.getPowerState() != powerState - || savedPowerHostId == null - || savedPowerHostId != powerHostId - || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance)) { - instance.setPowerState(powerState); - instance.setPowerHostId(powerHostId); - instance.setPowerStateUpdateCount(1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } else { - // to reduce DB updates, consecutive same state update for more than 3 times - if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { - instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } - } - } - return needToUpdate; + protected List listSelectPowerStateByIds(final List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", ids.toArray()); + return customSearch(sc, null); + } + + protected Integer getPowerUpdateCount(final VMInstanceVO instance, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + if (isStateMismatch) { + return 1; + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + return instance.getPowerStateUpdateCount() + 1; } + } + return null; + } + + @Override + public boolean updatePowerState(final long instanceId, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + return Transaction.execute((TransactionCallback) status -> { + VMInstanceVO instance = findById(instanceId); + if (instance == null) { + return false; + } + // Check if we need to update based on powerStateUpdateTime + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + + if (isStateMismatch) { + instance.setPowerState(powerState); + instance.setPowerHostId(powerHostId); + instance.setPowerStateUpdateCount(1); + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); + } else { + // No need to update if power state is already in sync and count exceeded + return false; + } + instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + update(instanceId, instance); + return true; // Return true since an update occurred + } + return false; }); } + @Override + public Map updatePowerState( + final Map instancePowerStates, long powerHostId, Date wisdomEra) { + Map notUpdated = new HashMap<>(); + List instances = listSelectPowerStateByIds(new ArrayList<>(instancePowerStates.keySet())); + Map updateCounts = new HashMap<>(); + for (VMInstanceVO instance : instances) { + VirtualMachine.PowerState powerState = instancePowerStates.get(instance.getId()); + Integer count = getPowerUpdateCount(instance, powerHostId, powerState, wisdomEra); + if (count != null) { + updateCounts.put(instance.getId(), count); + } else { + notUpdated.put(instance.getId(), powerState); + } + } + if (updateCounts.isEmpty()) { + return notUpdated; + } + StringBuilder sql = new StringBuilder("UPDATE `cloud`.`vm_instance` SET " + + "`power_host` = ?, `power_state_update_time` = now(), `power_state` = CASE "); + updateCounts.keySet().forEach(key -> { + sql.append("WHEN id = ").append(key).append(" THEN '").append(instancePowerStates.get(key)).append("' "); + }); + sql.append("END, `power_state_update_count` = CASE "); + StringBuilder idList = new StringBuilder(); + updateCounts.forEach((key, value) -> { + sql.append("WHEN `id` = ").append(key).append(" THEN ").append(value).append(" "); + idList.append(key).append(","); + }); + idList.setLength(idList.length() - 1); + sql.append("END WHERE `id` IN (").append(idList).append(")"); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql.toString())) { + pstmt.setLong(1, powerHostId); + pstmt.executeUpdate(); + } catch (SQLException e) { + logger.error("Unable to execute update power states SQL from VMs {} due to: {}", + idList, e.getMessage(), e); + return instancePowerStates; + } + return notUpdated; + } + private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerState powerState, final long powerHostId, final VMInstanceVO instance) { State instanceState = instance.getState(); if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) @@ -962,11 +1061,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public boolean isPowerStateUpToDate(final long instanceId) { - VMInstanceVO instance = findById(instanceId); - if(instance == null) { - throw new CloudRuntimeException("checking power state update count on non existing instance " + instanceId); - } + public boolean isPowerStateUpToDate(final VMInstanceVO instance) { return instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT; } @@ -985,6 +1080,25 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem }); } + @Override + public void resetVmPowerStateTracking(List instanceIds) { + if (CollectionUtils.isEmpty(instanceIds)) { + return; + } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", instanceIds.toArray()); + VMInstanceVO vm = createForUpdate(); + vm.setPowerStateUpdateCount(0); + vm.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + UpdateBuilder ub = getUpdateBuilder(vm); + update(ub, sc, null); + } + }); + } + @Override @DB public void resetHostPowerStateTracking(final long hostId) { Transaction.execute(new TransactionCallbackNoReturn() { @@ -1060,6 +1174,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return searchIncludingRemoved(sc, filter, null, false); } + @Override public Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) { SearchCriteria sc = VmsNotInClusterUsingPool.create(); sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); @@ -1069,4 +1184,44 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem List uniqueVms = vms.stream().distinct().collect(Collectors.toList()); return new Pair<>(uniqueVms, uniqueVms.size()); } + + @Override + public List listIdServiceOfferingForUpVmsByHostId(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("host", hostId); + sc.setParameters("states", new Object[] {State.Starting, State.Running, State.Stopping, State.Migrating}); + return customSearch(sc, null); + } + + @Override + public List listIdServiceOfferingForVmsMigratingFromHost(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("lastHost", hostId); + sc.setParameters("state", State.Migrating); + return customSearch(sc, null); + } + + @Override + public Map getNameIdMapForVmInstanceNames(Collection names) { + SearchBuilder sb = createSearchBuilder(); + sb.and("name", sb.entity().getInstanceName(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("name", names.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } + + @Override + public Map getNameIdMapForVmIds(Collection ids) { + SearchBuilder sb = createSearchBuilder(); + sb.and("id", sb.entity().getId(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", ids.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDao.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDao.java index 1c8d1cf48ff..e8b6322baee 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDao.java @@ -40,6 +40,13 @@ public interface NetworkPermissionDao extends GenericDao NetworkAndAccountSearch; private SearchBuilder NetworkIdSearch; + private SearchBuilder accountSearch; private GenericSearchBuilder FindNetworkIdsByAccount; protected NetworkPermissionDaoImpl() { @@ -45,6 +46,10 @@ public class NetworkPermissionDaoImpl extends GenericDaoBase sc = accountSearch.create(); + sc.setParameters("accountId", accountId); + int networkPermissionRemoved = expunge(sc); + if (networkPermissionRemoved > 0) { + logger.debug(String.format("Removed [%s] network permission(s) for the account with Id [%s]", networkPermissionRemoved, accountId)); + } + } + @Override public NetworkPermissionVO findByNetworkAndAccount(long networkId, long accountId) { SearchCriteria sc = NetworkAndAccountSearch.create(); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java index 8f3d264da98..6d0d9378c7c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java @@ -88,6 +88,8 @@ public interface ResourceDetailsDao extends GenericDao public Map listDetailsKeyPairs(long resourceId); + Map listDetailsKeyPairs(long resourceId, List keys); + public Map listDetailsKeyPairs(long resourceId, boolean forDisplay); Map listDetailsVisibility(long resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java index 4205a7823e4..f2e156f225a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.resourcedetail; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.apache.cloudstack.api.ResourceDetail; import org.apache.commons.collections.CollectionUtils; @@ -91,6 +92,20 @@ public abstract class ResourceDetailsDaoBase extends G return details; } + @Override + public Map listDetailsKeyPairs(long resourceId, List keys) { + SearchBuilder sb = createSearchBuilder(); + sb.and("resourceId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("name", keys.toArray()); + + List results = search(sc, null); + return results.stream().collect(Collectors.toMap(R::getName, R::getValue)); + } + public Map listDetailsVisibility(long resourceId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 1658fe0a537..07b0b8b517c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,20 +28,20 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.db.Filter; import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolTagVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_update_api_permission.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_update_api_permission.sql new file mode 100644 index 00000000000..c53e0067061 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_update_api_permission.sql @@ -0,0 +1,52 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION` ( + IN role VARCHAR(255), + IN rule VARCHAR(255), + IN permission VARCHAR(255) +) +BEGIN + DECLARE role_id BIGINT(20) UNSIGNED +; DECLARE max_sort_order BIGINT(20) UNSIGNED + +; SELECT `r`.`id` INTO role_id + FROM `cloud`.`roles` `r` + WHERE `r`.`name` = role + AND `r`.`is_default` = 1 + +; SELECT MAX(`rp`.`sort_order`) INTO max_sort_order + FROM `cloud`.`role_permissions` `rp` + WHERE `rp`.`role_id` = role_id + +; IF NOT EXISTS ( + SELECT * FROM `cloud`.`role_permissions` `rp` + WHERE `rp`.`role_id` = role_id + AND `rp`.`rule` = rule + ) THEN + UPDATE `cloud`.`role_permissions` `rp` + SET `rp`.`sort_order` = max_sort_order + 1 + WHERE `rp`.`sort_order` = max_sort_order + AND `rp`.`role_id` = role_id + +; INSERT INTO `cloud`.`role_permissions` + (uuid, role_id, rule, permission, sort_order) + VALUES (uuid(), role_id, rule, permission, max_sort_order) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql b/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql index 2ce8ea99bd1..12ead739d84 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql @@ -21,3 +21,25 @@ -- Add last_id to the volumes table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'last_id', 'bigint(20) unsigned DEFAULT NULL'); + +-- Grant access to 2FA APIs for the "Read-Only User - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'listUserTwoFactorAuthenticatorProviders', 'ALLOW'); + +-- Grant access to 2FA APIs for the "Support User - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support User - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support User - Default', 'listUserTwoFactorAuthenticatorProviders', 'ALLOW'); + +-- Grant access to 2FA APIs for the "Read-Only Admin - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only Admin - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only Admin - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); + +-- Grant access to 2FA APIs for the "Support Admin - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support Admin - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support Admin - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010-cleanup.sql index d187b6fa043..a00d50a7e10 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010-cleanup.sql @@ -18,3 +18,6 @@ --; -- Schema upgrade cleanup from 4.20.0.0 to 4.20.1.0 --; + +-- Delete `project_account` entries for users that were removed +DELETE FROM `cloud`.`project_account` WHERE `user_id` IN (SELECT `id` FROM `cloud`.`user` WHERE `removed`); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql index 8b70cce3404..92e0dbb5b2a 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql @@ -22,6 +22,7 @@ -- Add column api_key_access to user and account tables CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.user', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the user" AFTER `secret_key`'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" '); +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" '); -- Modify index for mshost_peer DELETE FROM `cloud`.`mshost_peer`; @@ -35,3 +36,6 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'last_id', 'bigint(20) uns -- Add used_iops column to support IOPS data in storage stats CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.storage_pool', 'used_iops', 'bigint unsigned DEFAULT NULL COMMENT "IOPS currently in use for this storage pool" '); + +-- Add reason column for op_ha_work +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.op_ha_work', 'reason', 'varchar(32) DEFAULT NULL COMMENT "Reason for the HA work"'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql index 92c2432344a..c357b2815d1 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql @@ -32,6 +32,9 @@ FROM `cloud`.`role_permissions` rp WHERE rp.rule = 'quotaStatement' AND NOT EXISTS(SELECT 1 FROM cloud.role_permissions rp_ WHERE rp.role_id = rp_.role_id AND rp_.rule = 'quotaCreditsList'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host', 'last_mgmt_server_id', 'bigint unsigned DEFAULT NULL COMMENT "last management server this host is connected to" AFTER `mgmt_server_id`'); + + ----------------------------------------------------------- -- CKS Enhancements: ----------------------------------------------------------- diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql index b6abaabcd48..640b2397a46 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql @@ -76,13 +76,9 @@ SELECT FROM `cloud`.`network_offerings` LEFT JOIN - `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid' + `cloud`.`domain` AS `domain` ON `domain`.id IN (SELECT value from `network_offering_details` where `name` = 'domainid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + `cloud`.`data_center` AS `zone` ON `zone`.`id` IN (SELECT value from `network_offering_details` where `name` = 'zoneid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN `cloud`.`network_offering_details` AS `offering_details` ON `offering_details`.`network_offering_id` = `network_offerings`.`id` AND `offering_details`.`name`='internetProtocol' GROUP BY diff --git a/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java new file mode 100644 index 00000000000..76c1092546a --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.capacity.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.capacity.CapacityVO; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class CapacityDaoImplTest { + @Spy + @InjectMocks + CapacityDaoImpl capacityDao = new CapacityDaoImpl(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + CapacityVO capacityVO = mock(CapacityVO.class); + when(searchBuilder.entity()).thenReturn(capacityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(capacityDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListByHostIdTypes() { + // Prepare inputs + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + CapacityVO capacity1 = new CapacityVO(); + CapacityVO capacity2 = new CapacityVO(); + List mockResult = Arrays.asList(capacity1, capacity2); + doReturn(mockResult).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertEquals(2, result.size()); + assertSame(capacity1, result.get(0)); + assertSame(capacity2, result.get(1)); + } + + @Test + public void testListByHostIdTypesEmptyResult() { + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + doReturn(Collections.emptyList()).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(Mockito.eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java new file mode 100644 index 00000000000..a513809be05 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.dc.ClusterVO; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterDaoImplTest { + @Spy + @InjectMocks + ClusterDaoImpl clusterDao = new ClusterDaoImpl(); + + private GenericSearchBuilder genericSearchBuilder; + + @Before + public void setUp() { + genericSearchBuilder = mock(SearchBuilder.class); + ClusterVO entityVO = mock(ClusterVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(clusterDao).createSearchBuilder(Long.class); + } + + @Test + public void testListAllIds() { + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + doReturn(Collections.emptyList()).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java b/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java new file mode 100644 index 00000000000..81163321c6b --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.host.dao; + +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class HostDaoImplTest { + + @Spy + HostDaoImpl hostDao = new HostDaoImpl(); + + @Mock + private SearchBuilder mockSearchBuilder; + @Mock + private SearchCriteria mockSearchCriteria; + + @Test + public void testCountUpAndEnabledHostsInZone() { + long testZoneId = 100L; + hostDao.HostTypeCountSearch = mockSearchBuilder; + Mockito.when(mockSearchBuilder.create()).thenReturn(mockSearchCriteria); + Mockito.doNothing().when(mockSearchCriteria).setParameters(Mockito.anyString(), Mockito.any()); + int expected = 5; + Mockito.doReturn(expected).when(hostDao).getCount(mockSearchCriteria); + Integer count = hostDao.countUpAndEnabledHostsInZone(testZoneId); + Assert.assertSame(expected, count); + Mockito.verify(mockSearchCriteria).setParameters("type", Host.Type.Routing); + Mockito.verify(mockSearchCriteria).setParameters("resourceState", ResourceState.Enabled); + Mockito.verify(mockSearchCriteria).setParameters("zoneId", testZoneId); + Mockito.verify(hostDao).getCount(mockSearchCriteria); + } + + @Test + public void testCountAllHostsAndCPUSocketsByType() { + Host.Type type = Host.Type.Routing; + GenericDaoBase.SumCount mockSumCount = new GenericDaoBase.SumCount(); + mockSumCount.count = 10; + mockSumCount.sum = 20; + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(GenericDaoBase.SumCount.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(List.of(mockSumCount)).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + Pair result = hostDao.countAllHostsAndCPUSocketsByType(type); + Assert.assertEquals(10, result.first().intValue()); + Assert.assertEquals(20, result.second().intValue()); + Mockito.verify(sc).setParameters("type", type); + } + + @Test + public void testIsHostUp() { + long testHostId = 101L; + List statuses = List.of(Status.Up); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Status.class); + Mockito.doReturn(statuses).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + boolean result = hostDao.isHostUp(testHostId); + Assert.assertTrue("Host should be up", result); + Mockito.verify(sc).setParameters("id", testHostId); + Mockito.verify(hostDao).customSearch(sc, null); + } + + @Test + public void testFindHostIdsByZoneClusterResourceStateTypeAndHypervisorType() { + Long zoneId = 1L; + Long clusterId = 2L; + List resourceStates = List.of(ResourceState.Enabled); + List types = List.of(Host.Type.Routing); + List hypervisorTypes = List.of(Hypervisor.HypervisorType.KVM); + List mockResults = List.of(1001L, 1002L); // Mocked result + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.when(sb.and()).thenReturn(sb); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Long.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hostIds = hostDao.findHostIdsByZoneClusterResourceStateTypeAndHypervisorType( + zoneId, clusterId, resourceStates, types, hypervisorTypes); + Assert.assertEquals(mockResults, hostIds); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("clusterId", clusterId); + Mockito.verify(sc).setParameters("resourceState", resourceStates.toArray()); + Mockito.verify(sc).setParameters("type", types.toArray()); + Mockito.verify(sc).setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + + @Test + public void testListDistinctHypervisorTypes() { + Long zoneId = 1L; + List mockResults = List.of(Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.XenServer); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Hypervisor.HypervisorType.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hypervisorTypes = hostDao.listDistinctHypervisorTypes(zoneId); + Assert.assertEquals(mockResults, hypervisorTypes); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("type", Host.Type.Routing); + } + + @Test + public void testListByIds() { + List ids = List.of(101L, 102L); + List mockResults = List.of(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + hostDao.IdsSearch = mockSearchBuilder; + Mockito.when(mockSearchBuilder.create()).thenReturn(mockSearchCriteria); + Mockito.doReturn(mockResults).when(hostDao).search(Mockito.any(SearchCriteria.class), Mockito.any()); + List hosts = hostDao.listByIds(ids); + Assert.assertEquals(mockResults, hosts); + Mockito.verify(mockSearchCriteria).setParameters("id", ids.toArray()); + Mockito.verify(hostDao).search(mockSearchCriteria, null); + } + + @Test + public void testListIdsBy() { + Host.Type type = Host.Type.Routing; + Status status = Status.Up; + ResourceState resourceState = ResourceState.Enabled; + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + Long zoneId = 1L, podId = 2L, clusterId = 3L; + List mockResults = List.of(1001L, 1002L); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Long.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hostIds = hostDao.listIdsBy(type, status, resourceState, hypervisorType, zoneId, podId, clusterId); + Assert.assertEquals(mockResults, hostIds); + Mockito.verify(sc).setParameters("type", type); + Mockito.verify(sc).setParameters("status", status); + Mockito.verify(sc).setParameters("resourceState", resourceState); + Mockito.verify(sc).setParameters("hypervisorType", hypervisorType); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("podId", podId); + Mockito.verify(sc).setParameters("clusterId", clusterId); + } +} diff --git a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java index 05d9154b6a4..fa47d2cd90b 100644 --- a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java @@ -23,12 +23,9 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.sql.PreparedStatement; -import com.cloud.utils.DateUtil; -import com.cloud.utils.db.TransactionLegacy; import java.util.Date; import java.util.TimeZone; -import com.cloud.usage.UsageStorageVO; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -36,6 +33,10 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import com.cloud.usage.UsageStorageVO; +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.TransactionLegacy; + @RunWith(MockitoJUnitRunner.class) public class UsageStorageDaoImplTest { diff --git a/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java new file mode 100644 index 00000000000..4c54599c396 --- /dev/null +++ b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class ResourceDetailsDaoBaseTest { + @Spy + @InjectMocks + TestDetailsDao testDetailsDao = new TestDetailsDao(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + searchCriteria = mock(SearchCriteria.class); + TestDetailVO entityVO = mock(TestDetailVO.class); + when(searchBuilder.entity()).thenReturn(entityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(testDetailsDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListDetailsKeyPairs() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + TestDetailVO result1 = mock(TestDetailVO.class); + when(result1.getName()).thenReturn("key1"); + when(result1.getValue()).thenReturn("value1"); + TestDetailVO result2 = mock(TestDetailVO.class); + when(result2.getName()).thenReturn("key2"); + when(result2.getValue()).thenReturn("value2"); + List mockResults = Arrays.asList(result1, result2); + doReturn(mockResults).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertEquals(2, result.size()); + assertEquals("value1", result.get("key1")); + assertEquals("value2", result.get("key2")); + } + + @Test + public void testListDetailsKeyPairsEmptyResult() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + doReturn(Collections.emptyList()).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertTrue(result.isEmpty()); + } + + protected static class TestDetailsDao extends ResourceDetailsDaoBase { + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new TestDetailVO(resourceId, key, value, display)); + } + } + + @Entity + @Table(name = "test_details") + protected static class TestDetailVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "resource_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value") + private String value; + + @Column(name = "display") + private boolean display = true; + + public TestDetailVO() { + } + + public TestDetailVO(long resourceId, String name, String value, boolean display) { + this.resourceId = resourceId; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } + + public void setName(String name) { + this.name = name; + } + + public void setValue(String value) { + this.value = value; + } + } +} diff --git a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java index bfcc38ba104..fc41a82e71d 100755 --- a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java +++ b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java @@ -17,12 +17,17 @@ package org.apache.cloudstack.storage.datastore.db; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.IOException; import java.sql.SQLException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +39,15 @@ import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; import junit.framework.TestCase; -import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class PrimaryDataStoreDaoImplTest extends TestCase { @@ -59,6 +66,8 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { @Mock StoragePoolVO storagePoolVO; + private GenericSearchBuilder genericSearchBuilder; + private static final String STORAGE_TAG_1 = "NFS-A"; private static final String STORAGE_TAG_2 = "NFS-B"; private static final String[] STORAGE_TAGS_ARRAY = {STORAGE_TAG_1, STORAGE_TAG_2}; @@ -155,4 +164,32 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { String expectedSql = primaryDataStoreDao.DetailsSqlPrefix + SQL_VALUES + primaryDataStoreDao.DetailsSqlSuffix; verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_POOL_DETAILS.size()); } + + @Test + public void testListAllIds() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + doReturn(Collections.emptyList()).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 63524ccb6db..2c034d8429a 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -124,18 +124,24 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement protected List reorderPoolsByCapacity(DeploymentPlan plan, List pools) { Long zoneId = plan.getDataCenterId(); Long clusterId = plan.getClusterId(); - short capacityType; if (CollectionUtils.isEmpty(pools)) { return null; } - if (pools.get(0).getPoolType().isShared()) { + short capacityType = Capacity.CAPACITY_TYPE_LOCAL_STORAGE; + String storageType = "local"; + StoragePool storagePool = pools.get(0); + if (storagePool.isShared()) { capacityType = Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED; - } else { - capacityType = Capacity.CAPACITY_TYPE_LOCAL_STORAGE; + storageType = "shared"; } + logger.debug(String.format( + "Filtering storage pools by capacity type [%s] as the first storage pool of the list, with name [%s] and ID [%s], is a [%s] storage.", + capacityType, storagePool.getName(), storagePool.getUuid(), storageType + )); + List poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType); logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity)); @@ -221,6 +227,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } List reorderStoragePoolsBasedOnAlgorithm(List pools, DeploymentPlan plan, Account account) { + logger.debug(String.format("Using allocation algorithm [%s] to reorder pools.", allocationAlgorithm)); + if (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) { reorderRandomPools(pools); } else if (StringUtils.equalsAny(allocationAlgorithm, "userdispersing", "firstfitleastconsumed")) { diff --git a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java index 9dfb9e1654e..b136b8e842b 100644 --- a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java +++ b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java @@ -20,6 +20,12 @@ import java.util.List; public interface IndirectAgentLB { + /** + * Return list of management server addresses from host setting + * @return management servers string list + */ + List getManagementServerList(); + /** * Return list of management server addresses after applying configured lb algorithm * for a host in a zone. @@ -30,6 +36,17 @@ public interface IndirectAgentLB { */ List getManagementServerList(Long hostId, Long dcId, List orderedHostIdList); + /** + * Return list of management server addresses after applying the lb algorithm + * for a host in a zone. + * @param hostId host id (if present) + * @param dcId zone id + * @param orderedHostIdList (optional) list of ordered host id list + * @param lbAlgorithm lb algorithm + * @return management servers string list + */ + List getManagementServerList(Long hostId, Long dcId, List orderedHostIdList, String lbAlgorithm); + /** * Compares received management server list against expected list for a host in a zone. * @param hostId host id @@ -45,6 +62,8 @@ public interface IndirectAgentLB { */ String getLBAlgorithmName(); + void checkLBAlgorithmName(String lbAlgorithm); + /** * Returns the configured LB preferred host check interval (if applicable at cluster scope) * @return returns interval in seconds @@ -53,4 +72,7 @@ public interface IndirectAgentLB { void propagateMSListToAgents(); + boolean haveAgentBasedHosts(long msId); + + boolean migrateAgents(String fromMsUuid, long fromMsId, String lbAlgorithm, long timeoutDurationInMs); } diff --git a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java index c87a0996fcc..062d2226876 100644 --- a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java +++ b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java @@ -42,4 +42,8 @@ public interface IndirectAgentLBAlgorithm { * @return true if the lists are equal, false if not */ boolean compare(final List msList, final List receivedMsList); + + default boolean isHostListNeeded() { + return false; + } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index e26e32e7b2e..1b45910b88a 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -941,7 +941,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); } catch (final Exception e) { - logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); + logger.warn("Unable to deregister cluster node from JMX monitoring due to exception " + e.toString()); } } @@ -1063,8 +1063,12 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C logger.info("New instance of management server {}, runId {} is being started", mshost, _runId); } } else { + ManagementServerHost.State msHostState = ManagementServerHost.State.Up; + if (ManagementServerHost.State.Maintenance.equals(mshost.getState()) || ManagementServerHost.State.PreparingForMaintenance.equals(mshost.getState())) { + msHostState = ManagementServerHost.State.Maintenance; + } _mshostDao.update(mshost.getId(), _runId, NetUtils.getCanonicalHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), - DateUtil.currentGMTTime()); + DateUtil.currentGMTTime(), msHostState); if (logger.isInfoEnabled()) { logger.info("Management server {}, runId {} is being started", mshost, _runId); } @@ -1102,11 +1106,17 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if (_mshostId != null) { final ManagementServerHostVO mshost = _mshostDao.findByMsid(_msId); - final ManagementServerStatusVO mshostStatus = mshostStatusDao.findByMsId(mshost.getUuid()); - mshost.setState(ManagementServerHost.State.Down); - mshostStatus.setLastJvmStop(new Date()); - _mshostDao.update(_mshostId, mshost); - mshostStatusDao.update(mshostStatus.getId(), mshostStatus); + if (mshost != null) { + final ManagementServerStatusVO mshostStatus = mshostStatusDao.findByMsId(mshost.getUuid()); + mshostStatus.setLastJvmStop(new Date()); + mshostStatusDao.update(mshostStatus.getId(), mshostStatus); + + ManagementServerHost.State msHostState = ManagementServerHost.State.Down; + if (ManagementServerHost.State.Maintenance.equals(mshost.getState()) || ManagementServerHost.State.PreparingForMaintenance.equals(mshost.getState())) { + msHostState = ManagementServerHost.State.Maintenance; + } + _mshostDao.updateState(mshost.getId(), msHostState); + } } _heartbeatScheduler.shutdownNow(); diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDao.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDao.java index 96d57ee0425..6c8ffcac78b 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDao.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDao.java @@ -33,10 +33,12 @@ public interface ManagementServerHostDao extends GenericDao getActiveList(Date cutTime); List getInactiveList(Date cutTime); @@ -47,6 +49,8 @@ public interface ManagementServerHostDao extends GenericDao listBy(ManagementServerHost.State... states); + List listNonUpStateMsIPs(); + /** * Lists msids for which hosts are orphaned, i.e. msids that hosts refer as their owning ms whilst no mshost entry exists with those msids * diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java index 27b6d52f61b..ec943a9c26b 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java @@ -35,6 +35,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; @@ -46,6 +47,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase ActiveSearch; private final SearchBuilder InactiveSearch; private final SearchBuilder StateSearch; + protected GenericSearchBuilder NonUpStateMsSearch; @Override public void invalidateRunSession(long id, long runid) { @@ -77,7 +79,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase getActiveList(Date cutTime) { SearchCriteria sc = ActiveSearch.create(); @@ -205,6 +229,11 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase listNonUpStateMsIPs() { + SearchCriteria sc = NonUpStateMsSearch.create(); + sc.addAnd("state", SearchCriteria.Op.NLIKE, State.Up); + return customSearch(sc, null); + } + @Override public List listOrphanMsids() { List orphanList = new ArrayList(); diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDao.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDao.java index 55559946cf0..37601e8ce78 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDao.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDao.java @@ -33,4 +33,7 @@ public interface ManagementServerHostPeerDao extends GenericDao FindForUpdateSearch; private final SearchBuilder CountSearch; private final SearchBuilder ActiveSearch; + private final SearchBuilder FindByOwnerAndPeerMsSearch; + private final SearchBuilder FindByPeerMsAndStateSearch; + public ManagementServerHostPeerDaoImpl() { ClearPeerSearch = createSearchBuilder(); @@ -59,6 +62,17 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase l = listBy(sc); - return l.size(); + return getCount(sc); } @Override @@ -133,4 +146,23 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase 0; } + + @Override + public ManagementServerHostPeerVO findByOwnerAndPeerMsHost(long ownerMshost, long peerMshost, ManagementServerHost.State peerState) { + SearchCriteria sc = FindByOwnerAndPeerMsSearch.create(); + sc.setParameters("ownerMshost", ownerMshost); + sc.setParameters("peerMshost", peerMshost); + sc.setParameters("peerState", peerState); + + return findOneBy(sc); + } + + @Override + public ManagementServerHostPeerVO findByPeerMsAndState(long peerMshost, ManagementServerHost.State peerState) { + SearchCriteria sc = FindByPeerMsAndStateSearch.create(); + sc.setParameters("peerMshost", peerMshost); + sc.setParameters("peerState", peerState); + + return findOneBy(sc); + } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java index b47370d9205..911a4ad3707 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import javax.inject.Inject; @@ -36,6 +35,7 @@ import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; +import org.apache.cloudstack.utils.cache.LazyCache; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -44,8 +44,6 @@ import org.apache.logging.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; /** * ConfigDepotImpl implements the ConfigDepot and ConfigDepotAdmin interface. @@ -87,17 +85,15 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { List _scopedStorages; Set _configured = Collections.synchronizedSet(new HashSet()); Set newConfigs = Collections.synchronizedSet(new HashSet<>()); - Cache configCache; + LazyCache configCache; private HashMap>> _allKeys = new HashMap>>(1007); HashMap>> _scopeLevelConfigsMap = new HashMap>>(); public ConfigDepotImpl() { - configCache = Caffeine.newBuilder() - .maximumSize(512) - .expireAfterWrite(CONFIG_CACHE_EXPIRE_SECONDS, TimeUnit.SECONDS) - .build(); + configCache = new LazyCache<>(512, + CONFIG_CACHE_EXPIRE_SECONDS, this::getConfigStringValueInternal); ConfigKey.init(this); createEmptyScopeLevelMappings(); } @@ -311,7 +307,7 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { @Override public String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId) { - return configCache.get(getConfigCacheKey(key, scope, scopeId), this::getConfigStringValueInternal); + return configCache.get(getConfigCacheKey(key, scope, scopeId)); } @Override diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java index de8838b0999..44c312ea9d8 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java @@ -148,6 +148,11 @@ public interface GenericDao { */ List listAll(Filter filter); + /** + * Look IDs for all active rows. + */ + List listAllIds(); + /** * Search for the entity beans * @param sc diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index c7f2daadc51..bf6fb03563f 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1218,6 +1218,35 @@ public abstract class GenericDaoBase extends Compone return executeList(sql.toString()); } + private Object getIdObject() { + T entity = (T)_searchEnhancer.create(); + try { + Method m = _entityBeanType.getMethod("getId"); + return m.invoke(entity); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + logger.warn("Unable to get ID object for entity: {}", _entityBeanType.getSimpleName()); + } + return null; + } + + @Override + public List listAllIds() { + Object idObj = getIdObject(); + if (idObj == null) { + return Collections.emptyList(); + } + Class clazz = (Class)idObj.getClass(); + GenericSearchBuilder sb = createSearchBuilder(clazz); + try { + Method m = sb.entity().getClass().getMethod("getId"); + sb.selectFields(m.invoke(sb.entity())); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + return Collections.emptyList(); + } + sb.done(); + return customSearch(sb.create(), null); + } + @Override public boolean expunge(final ID id) { final TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -2445,4 +2474,11 @@ public abstract class GenericDaoBase extends Compone } } + public static class SumCount { + public long sum; + public long count; + + public SumCount() { + } + } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java index b3bfda0334c..79ec3f2b087 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java @@ -40,4 +40,5 @@ public interface VmWorkJobDao extends GenericDao { void expungeLeftoverWorkJobs(long msid); int expungeByVmList(List vmIds, Long batchSize); + List listVmIdsWithPendingJob(); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index 3b167498a37..a467b5fdf59 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,6 +24,7 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; import org.apache.cloudstack.jobs.JobInfo; @@ -32,6 +33,8 @@ import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @@ -224,4 +227,17 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen sc.setParameters("vmIds", vmIds.toArray()); return batchExpunge(sc, batchSize); } + + @Override + public List listVmIdsWithPendingJob() { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + SearchBuilder asyncJobSearch = _baseJobDao.createSearchBuilder(); + asyncJobSearch.and("status", asyncJobSearch.entity().getStatus(), SearchCriteria.Op.EQ); + sb.join("asyncJobSearch", asyncJobSearch, sb.entity().getId(), asyncJobSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.and("removed", sb.entity().getRemoved(), Op.NULL); + sb.selectFields(sb.entity().getVmInstanceId()); + SearchCriteria sc = sb.create(); + sc.setJoinParameters("asyncJobSearch", "status", JobInfo.Status.IN_PROGRESS); + return customSearch(sc, null); + } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java index 47bf27bd6c4..448a4eb219c 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java @@ -174,7 +174,8 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, private ExecutorService _apiJobExecutor; private ExecutorService _workerJobExecutor; - private boolean asyncJobsEnabled = true; + private boolean asyncJobsDisabled = false; + private long asyncJobsDisabledTime = 0; @Override public String getConfigComponentName() { @@ -218,16 +219,48 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, return submitAsyncJob(job, false); } - private void checkShutdown() { - if (!isAsyncJobsEnabled()) { - throw new CloudRuntimeException("A shutdown has been triggered. Can not accept new jobs"); + private void checkAsyncJobAllowed(AsyncJob job) { + if (isAsyncJobsEnabled()) { + return; } + + if (job instanceof VmWorkJobVO) { + String related = job.getRelated(); + if (StringUtils.isNotBlank(related)) { + AsyncJob relatedJob = _jobDao.findByIdIncludingRemoved(Long.parseLong(related)); + if (relatedJob != null) { + long relatedJobCreatedTime = relatedJob.getCreated().getTime(); + if ((asyncJobsDisabledTime - relatedJobCreatedTime) >= 0) { + return; + } + } + } + } + + throw new CloudRuntimeException("Maintenance or Shutdown has been initiated on this management server. Can not accept new jobs"); + } + + private boolean checkSyncQueueItemAllowed(SyncQueueItemVO item) { + if (isAsyncJobsEnabled()) { + return true; + } + + Long contentId = item.getContentId(); + AsyncJob relatedJob = _jobDao.findByIdIncludingRemoved(contentId); + if (relatedJob != null) { + long relatedJobCreatedTime = relatedJob.getCreated().getTime(); + if ((asyncJobsDisabledTime - relatedJobCreatedTime) >= 0) { + return true; + } + } + + return false; } @SuppressWarnings("unchecked") @DB public long submitAsyncJob(AsyncJob job, boolean scheduleJobExecutionInContext) { - checkShutdown(); + checkAsyncJobAllowed(job); @SuppressWarnings("rawtypes") GenericDao dao = GenericDaoBase.getDao(job.getClass()); @@ -248,7 +281,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override @DB public long submitAsyncJob(final AsyncJob job, final String syncObjType, final long syncObjId) { - checkShutdown(); + checkAsyncJobAllowed(job); try { @SuppressWarnings("rawtypes") @@ -860,7 +893,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, protected void reallyRun() { try { if (!isAsyncJobsEnabled()) { - logger.info("A shutdown has been triggered. Not executing any async job"); + logger.info("Maintenance or Shutdown has been initiated on this management server. Not executing any async jobs"); return; } @@ -1301,16 +1334,18 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override public void enableAsyncJobs() { - this.asyncJobsEnabled = true; + this.asyncJobsDisabled = false; + this.asyncJobsDisabledTime = 0; } @Override public void disableAsyncJobs() { - this.asyncJobsEnabled = false; + this.asyncJobsDisabled = true; + this.asyncJobsDisabledTime = System.currentTimeMillis(); } @Override public boolean isAsyncJobsEnabled() { - return asyncJobsEnabled; + return !asyncJobsDisabled; } } diff --git a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java index 3e2bc15b1e0..a70a96b1a14 100644 --- a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java +++ b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java @@ -16,27 +16,69 @@ // under the License. package org.apache.cloudstack.framework.jobs.dao; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.apache.cloudstack.jobs.JobInfo; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; +import org.mockito.InjectMocks; +import org.mockito.Mock; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @RunWith(MockitoJUnitRunner.class) public class VmWorkJobDaoImplTest { + @Mock + AsyncJobDao asyncJobDao; @Spy + @InjectMocks VmWorkJobDaoImpl vmWorkJobDaoImpl; + private GenericSearchBuilder genericVmWorkJobSearchBuilder; + private SearchBuilder asyncJobSearchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + genericVmWorkJobSearchBuilder = mock(GenericSearchBuilder.class); + VmWorkJobVO entityVO = mock(VmWorkJobVO.class); + when(genericVmWorkJobSearchBuilder.entity()).thenReturn(entityVO); + asyncJobSearchBuilder = mock(SearchBuilder.class); + AsyncJobVO asyncJobVO = mock(AsyncJobVO.class); + when(asyncJobSearchBuilder.entity()).thenReturn(asyncJobVO); + searchCriteria = mock(SearchCriteria.class); + when(vmWorkJobDaoImpl.createSearchBuilder(Long.class)).thenReturn(genericVmWorkJobSearchBuilder); + when(asyncJobDao.createSearchBuilder()).thenReturn(asyncJobSearchBuilder); + when(genericVmWorkJobSearchBuilder.create()).thenReturn(searchCriteria); + } + @Test public void testExpungeByVmListNoVms() { Assert.assertEquals(0, vmWorkJobDaoImpl.expungeByVmList( @@ -47,22 +89,52 @@ public class VmWorkJobDaoImplTest { @Test public void testExpungeByVmList() { - SearchBuilder sb = Mockito.mock(SearchBuilder.class); - SearchCriteria sc = Mockito.mock(SearchCriteria.class); - Mockito.when(sb.create()).thenReturn(sc); - Mockito.doAnswer((Answer) invocationOnMock -> { + SearchBuilder sb = mock(SearchBuilder.class); + SearchCriteria sc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + doAnswer((Answer) invocationOnMock -> { Long batchSize = (Long)invocationOnMock.getArguments()[1]; return batchSize == null ? 0 : batchSize.intValue(); - }).when(vmWorkJobDaoImpl).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); - Mockito.when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); - final VmWorkJobVO mockedVO = Mockito.mock(VmWorkJobVO.class); - Mockito.when(sb.entity()).thenReturn(mockedVO); + }).when(vmWorkJobDaoImpl).batchExpunge(any(SearchCriteria.class), anyLong()); + when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); + final VmWorkJobVO mockedVO = mock(VmWorkJobVO.class); + when(sb.entity()).thenReturn(mockedVO); List vmIds = List.of(1L, 2L); Object[] array = vmIds.toArray(); Long batchSize = 50L; Assert.assertEquals(batchSize.intValue(), vmWorkJobDaoImpl.expungeByVmList(List.of(1L, 2L), batchSize)); - Mockito.verify(sc).setParameters("vmIds", array); - Mockito.verify(vmWorkJobDaoImpl, Mockito.times(1)) + verify(sc).setParameters("vmIds", array); + verify(vmWorkJobDaoImpl, times(1)) .batchExpunge(sc, batchSize); } + + @Test + public void testListVmIdsWithPendingJob() { + List mockVmIds = Arrays.asList(101L, 102L, 103L); + doReturn(mockVmIds).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(101L), result.get(0)); + assertEquals(Long.valueOf(102L), result.get(1)); + assertEquals(Long.valueOf(103L), result.get(2)); + } + + @Test + public void testListVmIdsWithPendingJobEmptyResult() { + doReturn(Collections.emptyList()).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertTrue(result.isEmpty()); + } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java index 37c90ab0bcd..2420d577f10 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java @@ -17,11 +17,19 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; +import com.cloud.utils.DateUtil; + +import java.util.Date; +import java.util.TimeZone; + public class Account extends GenericPresetVariable { @PresetVariableDefinition(description = "Role of the account. This field will not exist if the account is a project.") private Role role; + @PresetVariableDefinition(description = "The date the account was created in GMT. This field will not exist for the first root admin account.") + private String created; + public Role getRole() { return role; } @@ -31,4 +39,12 @@ public class Account extends GenericPresetVariable { fieldNamesToIncludeInToString.add("role"); } + public String getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = DateUtil.displayDateInTimezone(TimeZone.getTimeZone("GMT"), created); + fieldNamesToIncludeInToString.add("created"); + } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java index d5df3ae8a91..1e84ba27e02 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java @@ -224,6 +224,7 @@ public class PresetVariableHelper { Account account = new Account(); account.setId(accountVo.getUuid()); account.setName(accountVo.getName()); + account.setCreated(accountVo.getCreated()); setPresetVariableRoleInAccountIfAccountIsNotAProject(accountVo.getType(), accountVo.getRoleId(), account); diff --git a/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java b/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java index 45af4b8a29a..7f64939f7bb 100644 --- a/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java +++ b/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java @@ -375,11 +375,12 @@ public class PresetVariableHelperTest { Account account = getAccountForTests(); Mockito.doReturn(account.getId()).when(accountVoMock).getUuid(); Mockito.doReturn(account.getName()).when(accountVoMock).getName(); + Mockito.doReturn(account.getCreated()).when(accountVoMock).getCreated(); Account result = presetVariableHelperSpy.getPresetVariableAccount(1l); assertPresetVariableIdAndName(account, result); - validateFieldNamesToIncludeInToString(Arrays.asList("id", "name"), result); + validateFieldNamesToIncludeInToString(Arrays.asList("created", "id", "name"), result); } @Test diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index e34778820cb..244f4431a3b 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -17,6 +17,8 @@ %define __os_install_post %{nil} %global debug_package %{nil} +%global __requires_exclude libc\\.so\\..* +%define _binaries_in_noarch_packages_terminate_build 0 # DISABLE the post-percentinstall java repacking and line number stripping # we need to find a way to just disable the java repacking and line number stripping, but not the autodeps @@ -35,6 +37,7 @@ Group: System Environment/Libraries # FIXME do groups for every single one of the subpackages Source0: %{name}-%{_maventag}.tgz BuildRoot: %{_tmppath}/%{name}-%{_maventag}-%{release}-build +BuildArch: noarch BuildRequires: (java-11-openjdk-devel or java-17-openjdk-devel) #BuildRequires: ws-commons-util @@ -68,7 +71,7 @@ Requires: (openssh-clients or openssh) Requires: (nfs-utils or nfs-client) Requires: iproute Requires: wget -Requires: mysql +Requires: (mysql or mariadb) Requires: sudo Requires: /sbin/service Requires: /sbin/chkconfig @@ -117,7 +120,7 @@ Requires: qemu-kvm Requires: cryptsetup Requires: rng-tools Requires: (libgcrypt > 1.8.3 or libgcrypt20) -Requires: (selinux-tools if qemu-tools) +Requires: (selinux-tools if selinux-tools) Requires: sysstat Provides: cloud-agent Group: System Environment/Libraries diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index db40b6e68dd..030e0bcf014 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -26,20 +26,21 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.acl.RolePermissionEntity.Permission; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.lang3.StringUtils; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.UnavailableCommandException; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.User; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.PluggableService; -import org.apache.commons.lang3.StringUtils; public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements APIAclChecker { - @Inject private AccountService accountService; @Inject @@ -48,6 +49,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API private List services; private Map> annotationRoleBasedApisMap = new HashMap>(); + private LazyCache accountCache; + private LazyCache>> rolePermissionsCache; + private int cachePeriod; protected DynamicRoleBasedAPIAccessChecker() { super(); @@ -99,23 +103,66 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API annotationRoleBasedApisMap.get(role.getRoleType()).contains(apiName); } + protected Account getAccountFromId(long accountId) { + return accountService.getAccount(accountId); + } + + protected Pair> getRolePermissions(long roleId) { + final Role accountRole = roleService.findRole(roleId); + if (accountRole == null || accountRole.getId() < 1L) { + return new Pair<>(null, null); + } + + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + return new Pair<>(accountRole, null); + } + + return new Pair<>(accountRole, roleService.findAllPermissionsBy(accountRole.getId())); + } + + protected Pair> getRolePermissionsUsingCache(long roleId) { + if (cachePeriod > 0) { + return rolePermissionsCache.get(roleId); + } + return getRolePermissions(roleId); + } + + protected Account getAccountFromIdUsingCache(long accountId) { + if (cachePeriod > 0) { + return accountCache.get(accountId); + } + return getAccountFromId(accountId); + } + @Override public boolean checkAccess(User user, String commandName) throws PermissionDeniedException { if (!isEnabled()) { return true; } - - Account account = accountService.getAccount(user.getAccountId()); + Account account = getAccountFromIdUsingCache(user.getAccountId()); if (account == null) { - throw new PermissionDeniedException(String.format("The account id [%s] for user id [%s] is null.", user.getAccountId(), user.getUuid())); + throw new PermissionDeniedException(String.format("Account for user id [%s] cannot be found", user.getUuid())); } - - return checkAccess(account, commandName); + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { + throw new PermissionDeniedException(String.format("Account role for user id [%s] cannot be found.", user.getUuid())); + } + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + logger.info("Account for user id {} is Root Admin or Domain Admin, all APIs are allowed.", user.getUuid()); + return true; + } + List allPermissions = roleAndPermissions.second(); + if (checkApiPermissionByRole(accountRole, commandName, allPermissions)) { + return true; + } + throw new UnavailableCommandException(String.format("The API [%s] does not exist or is not available for the account for user id [%s].", commandName, user.getUuid())); } public boolean checkAccess(Account account, String commandName) { - final Role accountRole = roleService.findRole(account.getRoleId()); - if (accountRole == null || accountRole.getId() < 1L) { + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { throw new PermissionDeniedException(String.format("The account [%s] has role null or unknown.", account)); } @@ -160,6 +207,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); + cachePeriod = Math.max(0, RoleService.DynamicApiCheckerCachePeriod.value()); + accountCache = new LazyCache<>(32, cachePeriod, this::getAccountFromId); + rolePermissionsCache = new LazyCache<>(32, cachePeriod, this::getRolePermissions); return true; } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index 0ed658aa70d..667b475eada 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -321,13 +321,13 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } } //add all hosts inside this in includeList - List hostList = _hostDao.listByDataCenterId(dr.getDataCenterId()); - for (HostVO host : hostList) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + List hostList = _hostDao.listEnabledIdsByDataCenterId(dr.getDataCenterId()); + for (Long hostId : hostList) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null && !dedicatedResources.contains(dHost)) { - avoidList.addHost(host.getId()); + avoidList.addHost(hostId); } else { - includeList.addHost(host.getId()); + includeList.addHost(hostId); } } } @@ -337,7 +337,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement List pods = _podDao.listByDataCenterId(dc.getId()); List clusters = _clusterDao.listClustersByDcId(dc.getId()); - List hosts = _hostDao.listByDataCenterId(dc.getId()); + List hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); Set podsInIncludeList = includeList.getPodsToAvoid(); Set clustersInIncludeList = includeList.getClustersToAvoid(); Set hostsInIncludeList = includeList.getHostsToAvoid(); @@ -357,9 +357,9 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } } - for (HostVO host : hosts) { - if (hostsInIncludeList != null && !hostsInIncludeList.contains(host.getId())) { - avoidList.addHost(host.getId()); + for (Long hostId : hostIds) { + if (hostsInIncludeList != null && !hostsInIncludeList.contains(hostId)) { + avoidList.addHost(hostId); } } return avoidList; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index cd7dc2bbbad..1f020726793 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -23,7 +23,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -45,8 +44,9 @@ import org.apache.cloudstack.api.response.DedicatePodResponse; import org.apache.cloudstack.api.response.DedicateZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -126,7 +126,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Zone") public List dedicateZone(final Long zoneId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -203,18 +203,20 @@ public class DedicatedResourceManagerImpl implements DedicatedService { releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.listByDataCenterId(dc.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error(String.format("Host %s under this Zone %s is dedicated to different account/domain", host, dc)); + HostVO host = _hostDao.findById(hostId); + logger.error("{} under {} is dedicated to different account/domain", host, dc); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -230,7 +232,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -284,7 +286,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { childDomainIds.add(domainId); checkAccountAndDomain(accountId, domainId); HostPodVO pod = _podDao.findById(podId); - List hosts = null; + List hostIds = null; if (pod == null) { throw new InvalidParameterValueException("Unable to find pod by id " + podId); } else { @@ -339,18 +341,20 @@ public class DedicatedResourceManagerImpl implements DedicatedService { releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.findByPodId(pod.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listIdsByPodId(pod.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(getDomainChildIds(domainId).contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error(String.format("Host %s under this Pod %s is dedicated to different account/domain", host, pod)); + HostVO host = _hostDao.findById(hostId); + logger.error("{} under this {} is dedicated to different account/domain", host, pod); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -366,7 +370,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -402,7 +406,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Cluster") public List dedicateCluster(final Long clusterId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -448,12 +452,13 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } //check if any resource under this cluster is dedicated to different account or sub-domain - hosts = _hostDao.findByClusterId(cluster.getId()); + hostIds = _hostDao.listIdsByClusterId(cluster.getId()); List hostsToRelease = new ArrayList(); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Cluster " + cluster.getName() + " is dedicated to different account/domain"); } @@ -479,7 +484,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -576,7 +581,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); - checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host.getId()); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -662,13 +667,14 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return vms; } - private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, Host host) { + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { boolean suitable = true; - List allVmsOnHost = getVmsOnHost(host.getId()); + List allVmsOnHost = getVmsOnHost(hostId); if (accountId != null) { for (UserVmVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another account", host)); + Host host = _hostDao.findById(hostId); + logger.info("{} found to be unsuitable for explicit dedication as it is running instances of another account", host); throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); } @@ -676,7 +682,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } else { for (UserVmVO vm : allVmsOnHost) { if (!domainIds.contains(vm.getDomainId())) { - logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another domain", host)); + Host host = _hostDao.findById(hostId); + logger.info("{} found to be unsuitable for explicit dedication as it is running instances of another domain", host); throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); } @@ -685,10 +692,10 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return suitable; } - private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { + private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hostIds) { boolean suitable = true; - for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host); + for (Long hostId : hostIds) { + checkHostSuitabilityForExplicitDedication(accountId, domainIds, hostId); } return suitable; } diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index b971b3b8596..f9cde2ae441 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -21,14 +21,15 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import com.cloud.configuration.Config; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.host.HostVO; import com.cloud.resource.ResourceManager; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -38,7 +39,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachineProfile; -import org.springframework.util.CollectionUtils; public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { @@ -73,12 +73,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId()); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - allHosts.add(hostVO.getId()); - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of @@ -224,20 +223,15 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy } private List getUpdatedClusterList(List clusterList, Set hostsSet) { - List updatedClusterList = new ArrayList(); - for (Long cluster : clusterList) { - List hosts = resourceMgr.listAllHostsInCluster(cluster); - Set hostsInClusterSet = new HashSet(); - for (HostVO host : hosts) { - hostsInClusterSet.add(host.getId()); - } - - if (!hostsSet.containsAll(hostsInClusterSet)) { - updatedClusterList.add(cluster); - } + if (CollectionUtils.isEmpty(clusterList)) { + return new ArrayList<>(); } - - return updatedClusterList; + return clusterList.stream() + .filter(cluster -> { + Set hostsInClusterSet = new HashSet<>(hostDao.listIdsByClusterId(cluster)); + return !hostsSet.containsAll(hostsInClusterSet); + }) + .collect(Collectors.toList()); } @Override @@ -257,15 +251,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy Account account = vmProfile.getOwner(); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - if (!CollectionUtils.isEmpty(clusterList)) { - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - - allHosts.add(hostVO.getId()); - } - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of // 1. All empty hosts, not running any vms. diff --git a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java index e174824cfdd..2d2b4c78261 100644 --- a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java +++ b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -16,11 +16,11 @@ // under the License. package org.apache.cloudstack.implicitplanner; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.everyItem; -import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -36,7 +36,11 @@ import java.util.UUID; import javax.inject.Inject; -import com.cloud.user.User; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -54,12 +58,6 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; - import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; @@ -73,7 +71,6 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.ImplicitDedicationPlanner; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.gpu.dao.HostGpuGroupsDao; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.host.dao.HostTagsDao; @@ -90,6 +87,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; +import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; @@ -387,21 +385,9 @@ public class ImplicitPlannerTest { when(serviceOfferingDetailsDao.listDetailsKeyPairs(offeringId)).thenReturn(details); // Initialize hosts in clusters - HostVO host1 = mock(HostVO.class); - when(host1.getId()).thenReturn(5L); - HostVO host2 = mock(HostVO.class); - when(host2.getId()).thenReturn(6L); - HostVO host3 = mock(HostVO.class); - when(host3.getId()).thenReturn(7L); - List hostsInCluster1 = new ArrayList(); - List hostsInCluster2 = new ArrayList(); - List hostsInCluster3 = new ArrayList(); - hostsInCluster1.add(host1); - hostsInCluster2.add(host2); - hostsInCluster3.add(host3); - when(resourceMgr.listAllHostsInCluster(1)).thenReturn(hostsInCluster1); - when(resourceMgr.listAllHostsInCluster(2)).thenReturn(hostsInCluster2); - when(resourceMgr.listAllHostsInCluster(3)).thenReturn(hostsInCluster3); + when(hostDao.listIdsByClusterId(1L)).thenReturn(List.of(5L)); + when(hostDao.listIdsByClusterId(2L)).thenReturn(List.of(6L)); + when(hostDao.listIdsByClusterId(3L)).thenReturn(List.of(7L)); // Mock vms on each host. long offeringIdForVmsOfThisAccount = 15L; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index ca3395be5dc..2f57ad6ffb0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -18,6 +18,7 @@ package com.cloud.hypervisor.kvm.resource; import static com.cloud.host.Host.HOST_INSTANCE_CONVERSION; import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION; +import static org.apache.cloudstack.utils.linux.KVMHostInfo.isHostS390x; import java.io.BufferedReader; import java.io.File; @@ -244,11 +245,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv private static final String LEGACY = "legacy"; private static final String SECURE = "secure"; + /** + * Machine type for s390x architecture + */ + private static final String S390X_VIRTIO_DEVICE = "s390-ccw-virtio"; + /** * Machine type. */ - private static final String PC = "pc"; - private static final String VIRT = "virt"; + private static final String PC = isHostS390x() ? S390X_VIRTIO_DEVICE : "pc"; + private static final String VIRT = isHostS390x() ? S390X_VIRTIO_DEVICE : "virt"; /** * Possible devices to add to VM. @@ -305,6 +311,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv * Constant that defines ARM64 (aarch64) guest architectures. */ private static final String AARCH64 = "aarch64"; + /** + * Constant that defines IBM Z Arch (s390x) guest architectures. + */ + private static final String S390X = "s390x"; public static final String RESIZE_NOTIFY_ONLY = "NOTIFYONLY"; public static final String BASEPATH = "/usr/share/cloudstack-common/vms/"; @@ -437,7 +447,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv protected static final String LOCAL_STORAGE_PATH = "local.storage.path"; protected static final String LOCAL_STORAGE_UUID = "local.storage.uuid"; - protected static final String DEFAULT_LOCAL_STORAGE_PATH = "/var/lib/libvirt/images/"; + public static final String DEFAULT_LOCAL_STORAGE_PATH = "/var/lib/libvirt/images"; protected List localStoragePaths = new ArrayList<>(); protected List localStorageUUIDs = new ArrayList<>(); @@ -1796,7 +1806,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv "^dummy", "^lo", "^p\\d+p\\d+", - "^vni" + "^vni", + "^enc" }; /** @@ -2642,12 +2653,15 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } devices.addDevice(createChannelDef(vmTO)); - devices.addDevice(createWatchDogDef()); + if (!isGuestS390x()) { + devices.addDevice(createWatchDogDef()); + } devices.addDevice(createVideoDef(vmTO)); devices.addDevice(createConsoleDef()); devices.addDevice(createGraphicDef(vmTO)); - devices.addDevice(createTabletInputDef()); - + if (!isGuestS390x()) { + devices.addDevice(createTabletInputDef()); + } if (isGuestAarch64()) { createArm64UsbDef(devices); } @@ -2661,7 +2675,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv Map details = vmTO.getDetails(); boolean isIothreadsEnabled = details != null && details.containsKey(VmDetailConstants.IOTHREADS); - devices.addDevice(createSCSIDef(vcpus, isIothreadsEnabled)); + addSCSIControllers(devices, vcpus, vmTO.getDisks().length, isIothreadsEnabled); } return devices; } @@ -2699,8 +2713,19 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv * Creates Virtio SCSI controller.
* The respective Virtio SCSI XML definition is generated only if the VM's Disk Bus is of ISCSI. */ - protected SCSIDef createSCSIDef(int vcpus, boolean isIothreadsEnabled) { - return new SCSIDef((short)0, 0, 0, 9, 0, vcpus, isIothreadsEnabled); + protected SCSIDef createSCSIDef(short index, int vcpus, boolean isIothreadsEnabled) { + return new SCSIDef(index, 0, 0, 9 + index, 0, vcpus, isIothreadsEnabled); + } + + + private void addSCSIControllers(DevicesDef devices, int vcpus, int diskCount, boolean isIothreadsEnabled) { + int controllers = diskCount / 7; + if (diskCount % 7 != 0) { + controllers++; + } + for (int i = 0; i < controllers; i++) { + devices.addDevice(createSCSIDef((short)i, vcpus, isIothreadsEnabled)); + } } protected ConsoleDef createConsoleDef() { @@ -2754,7 +2779,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv FeaturesDef features = new FeaturesDef(); features.addFeatures(PAE); features.addFeatures(APIC); - features.addFeatures(ACPI); + if (!isHostS390x()) { + features.addFeatures(ACPI); + } if (isUefiEnabled && isSecureBoot) { features.addFeatures(SMM); } @@ -2846,6 +2873,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return AARCH64.equals(guestCpuArch); } + private boolean isGuestS390x() { + return S390X.equals(guestCpuArch); + } + /** * Creates a guest definition from a VM specification. */ @@ -2856,7 +2887,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv guest.setManufacturer(vmTO.getMetadataManufacturer()); guest.setProduct(vmTO.getMetadataProductName()); guest.setGuestArch(guestCpuArch != null ? guestCpuArch : vmTO.getArch()); - guest.setMachineType(isGuestAarch64() ? VIRT : PC); + guest.setMachineType((isGuestAarch64() || isGuestS390x()) ? VIRT : PC); guest.setBootType(GuestDef.BootType.BIOS); if (MapUtils.isNotEmpty(customParams)) { if (customParams.containsKey(GuestDef.BootType.UEFI.toString())) { @@ -2870,7 +2901,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv guest.setIothreads(customParams.containsKey(VmDetailConstants.IOTHREADS)); } guest.setUuid(uuid); - guest.setBootOrder(GuestDef.BootOrder.CDROM); + if(!isGuestS390x()) { + guest.setBootOrder(GuestDef.BootOrder.CDROM); + } guest.setBootOrder(GuestDef.BootOrder.HARDISK); return guest; } @@ -3111,7 +3144,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv final DiskDef.DiskType diskType = getDiskType(physicalDisk); disk.defISODisk(volPath, devId, isUefiEnabled, diskType); - if (guestCpuArch != null && guestCpuArch.equals("aarch64")) { + if (guestCpuArch != null && (guestCpuArch.equals("aarch64") || guestCpuArch.equals("s390x"))) { disk.setBusType(DiskDef.DiskBus.SCSI); } } else { @@ -3209,7 +3242,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (vmSpec.getType() != VirtualMachine.Type.User) { final DiskDef iso = new DiskDef(); iso.defISODisk(sysvmISOPath, DiskDef.DiskType.FILE); - if (guestCpuArch != null && guestCpuArch.equals("aarch64")) { + if (guestCpuArch != null && (guestCpuArch.equals("aarch64") || guestCpuArch.equals("s390x"))) { iso.setBusType(DiskDef.DiskBus.SCSI); } vm.getDevices().addDevice(iso); @@ -4283,7 +4316,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return DiskDef.DiskBus.VIRTIO; } else if (isUefiEnabled && StringUtils.startsWithAny(platformEmulator, "Windows", "Other")) { return DiskDef.DiskBus.SATA; - } else if (guestCpuArch != null && guestCpuArch.equals("aarch64")) { + } else if (guestCpuArch != null && (guestCpuArch.equals("aarch64") || guestCpuArch.equals("s390x"))) { return DiskDef.DiskBus.SCSI; } else { return DiskDef.DiskBus.IDE; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index a67294ecadb..82617696954 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -248,7 +248,9 @@ public class LibvirtVMDef { guestDef.append("\n"); } } - guestDef.append("\n"); + if (!(_arch != null && _arch.equals("s390x"))) { + guestDef.append("\n"); + } guestDef.append("\n"); if (iothreads) { guestDef.append(String.format("%s", NUMBER_OF_IOTHREADS)); @@ -580,7 +582,7 @@ public class LibvirtVMDef { } } - if (_emulator != null && _emulator.endsWith("aarch64")) { + if (_emulator != null && (_emulator.endsWith("aarch64") || _emulator.endsWith("s390x"))) { devicesBuilder.append("\n"); for (int i = 0; i < 32; i++) { devicesBuilder.append("\n"); @@ -1652,7 +1654,7 @@ public class LibvirtVMDef { if (_scriptPath != null) { netBuilder.append(" diff --git a/ui/src/views/network/AclListRulesTab.vue b/ui/src/views/network/AclListRulesTab.vue index 5303a97aefb..e01e61289e5 100644 --- a/ui/src/views/network/AclListRulesTab.vue +++ b/ui/src/views/network/AclListRulesTab.vue @@ -781,7 +781,7 @@ export default { const csvData = this.csv({ data: this.acls }) const hiddenElement = document.createElement('a') - hiddenElement.href = 'data:text/csv;charset=utf-8,' + encodeURI(csvData) + hiddenElement.href = 'data:text/csv;charset=utf-8,' + encodeURIComponent(csvData) hiddenElement.target = '_blank' hiddenElement.download = 'AclRules-' + this.resource.name + '-' + this.resource.id + '.csv' hiddenElement.click() diff --git a/ui/src/views/network/FirewallRules.vue b/ui/src/views/network/FirewallRules.vue index 43ee9536be5..a0ecb2042ae 100644 --- a/ui/src/views/network/FirewallRules.vue +++ b/ui/src/views/network/FirewallRules.vue @@ -457,6 +457,9 @@ export default { addRule () { if (this.loading) return this.loading = true + if (this.newRule.cidrlist == null || this.newRule.cidrlist.trim?.() === '') { + delete this.newRule.cidrlist + } api('createFirewallRule', { ...this.newRule }).then(response => { this.$pollJob({ jobId: response.createfirewallruleresponse.jobid, diff --git a/ui/src/views/network/VpcTiersTab.vue b/ui/src/views/network/VpcTiersTab.vue index e9eaddf149e..5d985ae7688 100644 --- a/ui/src/views/network/VpcTiersTab.vue +++ b/ui/src/views/network/VpcTiersTab.vue @@ -213,7 +213,7 @@ @change="updateMtu()"/>
- + diff --git a/ui/src/views/setting/ConfigurationValue.vue b/ui/src/views/setting/ConfigurationValue.vue index 4ae0244b746..23580bbfb83 100644 --- a/ui/src/views/setting/ConfigurationValue.vue +++ b/ui/src/views/setting/ConfigurationValue.vue @@ -39,6 +39,7 @@ @keydown.esc="editableValueKey = null" @pressEnter="updateConfigurationValue(configrecord)" @change="value => setConfigurationEditable(configrecord, value)" + @keydown="e => handleInputNumberKeyDown(e, false)" /> @@ -52,12 +53,13 @@ @keydown.esc="editableValueKey = null" @pressEnter="updateConfigurationValue(configrecord)" @change="value => setConfigurationEditable(configrecord, value)" + @keydown="e => handleInputNumberKeyDown(e, true)" /> - - - + + + - + @@ -365,6 +368,26 @@ export default { } else { this.editableValueKey = null } + }, + handleInputNumberKeyDown (event, isDecimal) { + const allowedCodes = ['Backspace', 'Delete', 'ArrowLeft', 'ArrowRight', 'Minus'] + + if (isDecimal) { + allowedCodes.push('Period') + } + + if ( + event.getModifierState('Control') || + event.getModifierState('Meta') || + event.getModifierState('Alt') + ) { + return + } + + const isValid = allowedCodes.includes(event.code) || !isNaN(event.key) + if (!isValid) { + event.preventDefault() + } } } } diff --git a/utils/src/main/java/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java b/utils/src/main/java/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java index ac21036c59e..043c77a9a0d 100644 --- a/utils/src/main/java/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java +++ b/utils/src/main/java/com/cloud/utils/backoff/impl/ConstantTimeBackoff.java @@ -54,7 +54,6 @@ public class ConstantTimeBackoff extends AdapterBase implements BackoffAlgorithm } finally { _asleep.remove(current.getName()); } - return; } @Override diff --git a/utils/src/main/java/com/cloud/utils/nio/HandlerFactory.java b/utils/src/main/java/com/cloud/utils/nio/HandlerFactory.java index f6b9c82d80b..6f0f1945e01 100644 --- a/utils/src/main/java/com/cloud/utils/nio/HandlerFactory.java +++ b/utils/src/main/java/com/cloud/utils/nio/HandlerFactory.java @@ -19,10 +19,19 @@ package com.cloud.utils.nio; +import java.net.SocketAddress; + /** * WorkerFactory creates and selects workers. */ public interface HandlerFactory { public Task create(Task.Type type, Link link, byte[] data); - + default int getMaxConcurrentNewConnectionsCount() { + return 0; + } + default int getNewConnectionsCount() { + return 0; + } + default void registerNewConnection(SocketAddress address) {} + default void unregisterNewConnection(SocketAddress address) {} } diff --git a/utils/src/main/java/com/cloud/utils/nio/Link.java b/utils/src/main/java/com/cloud/utils/nio/Link.java index 71d881a94ae..5404cd15343 100644 --- a/utils/src/main/java/com/cloud/utils/nio/Link.java +++ b/utils/src/main/java/com/cloud/utils/nio/Link.java @@ -48,8 +48,9 @@ import javax.net.ssl.TrustManagerFactory; import org.apache.cloudstack.framework.ca.CAService; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.cloudstack.utils.security.SSLUtils; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.ObjectUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; @@ -592,6 +593,10 @@ public class Link { } public static boolean doHandshake(final SocketChannel socketChannel, final SSLEngine sslEngine) throws IOException { + return doHandshake(socketChannel, sslEngine, null); + } + + public static boolean doHandshake(final SocketChannel socketChannel, final SSLEngine sslEngine, Integer timeout) throws IOException { if (socketChannel == null || sslEngine == null) { return false; } @@ -606,12 +611,15 @@ public class Link { final long startTimeMills = System.currentTimeMillis(); HandshakeStatus handshakeStatus = sslEngine.getHandshakeStatus(); + long timeoutMillis = ObjectUtils.defaultIfNull(timeout, 30) * 1000L; while (handshakeStatus != SSLEngineResult.HandshakeStatus.FINISHED && handshakeStatus != SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING) { final long timeTaken = System.currentTimeMillis() - startTimeMills; - if (timeTaken > 30000L) { - LOGGER.warn("SSL Handshake has taken more than 30s to connect to: " + socketChannel.getRemoteAddress() + - ". Please investigate this connection."); + + if (timeTaken > timeoutMillis) { + LOGGER.warn("SSL Handshake has taken more than {}ms to connect to: {}" + + " while status: {}. Please investigate this connection.", socketChannel.getRemoteAddress(), + handshakeStatus); return false; } switch (handshakeStatus) { diff --git a/utils/src/main/java/com/cloud/utils/nio/NioClient.java b/utils/src/main/java/com/cloud/utils/nio/NioClient.java index 89f51399e26..46d67feaaf3 100644 --- a/utils/src/main/java/com/cloud/utils/nio/NioClient.java +++ b/utils/src/main/java/com/cloud/utils/nio/NioClient.java @@ -33,73 +33,85 @@ import org.apache.cloudstack.utils.security.SSLUtils; public class NioClient extends NioConnection { - protected String _host; - protected SocketChannel _clientConnection; + protected String host; + protected SocketChannel clientConnection; - public NioClient(final String name, final String host, final int port, final int workers, final HandlerFactory factory) { + public NioClient(final String name, final String host, final int port, final int workers, + final Integer sslHandshakeTimeout, final HandlerFactory factory) { super(name, port, workers, factory); - _host = host; + setSslHandshakeTimeout(sslHandshakeTimeout); + this.host = host; + } + + protected void closeChannel() { + try { + if (clientConnection != null && clientConnection.isOpen()) { + clientConnection.close(); + } + } catch (IOException e) { + logger.error("Failed to close SocketChannel", e); + } } @Override protected void init() throws IOException { - _selector = Selector.open(); - Task task = null; - + Task task; + String hostLog = host + ":" + _port; try { - _clientConnection = SocketChannel.open(); - - logger.info("Connecting to " + _host + ":" + _port); - final InetSocketAddress peerAddr = new InetSocketAddress(_host, _port); - _clientConnection.connect(peerAddr); - _clientConnection.configureBlocking(false); + logger.info("Connecting to {}", hostLog); + _selector = Selector.open(); + clientConnection = SocketChannel.open(); + final InetSocketAddress serverAddress = new InetSocketAddress(host, _port); + clientConnection.connect(serverAddress); + logger.info("Connected to {}", hostLog); + clientConnection.configureBlocking(false); final SSLContext sslContext = Link.initClientSSLContext(); - SSLEngine sslEngine = sslContext.createSSLEngine(_host, _port); + SSLEngine sslEngine = sslContext.createSSLEngine(host, _port); sslEngine.setUseClientMode(true); sslEngine.setEnabledProtocols(SSLUtils.getSupportedProtocols(sslEngine.getEnabledProtocols())); sslEngine.beginHandshake(); - if (!Link.doHandshake(_clientConnection, sslEngine)) { - logger.error("SSL Handshake failed while connecting to host: " + _host + " port: " + _port); - _selector.close(); - throw new IOException("SSL Handshake failed while connecting to host: " + _host + " port: " + _port); + if (!Link.doHandshake(clientConnection, sslEngine, getSslHandshakeTimeout())) { + throw new IOException(String.format("SSL Handshake failed while connecting to host: %s", hostLog)); } logger.info("SSL: Handshake done"); - logger.info("Connected to " + _host + ":" + _port); - final Link link = new Link(peerAddr, this); + final Link link = new Link(serverAddress, this); link.setSSLEngine(sslEngine); - final SelectionKey key = _clientConnection.register(_selector, SelectionKey.OP_READ); + final SelectionKey key = clientConnection.register(_selector, SelectionKey.OP_READ); link.setKey(key); key.attach(link); // Notice we've already connected due to the handshake, so let's get the // remaining task done task = _factory.create(Task.Type.CONNECT, link, null); } catch (final GeneralSecurityException e) { - _selector.close(); + closeChannel(); throw new IOException("Failed to initialise security", e); } catch (final IOException e) { - _selector.close(); + closeChannel(); + logger.error("IOException while connecting to {}", hostLog, e); throw e; } - _executor.submit(task); + if (task != null) { + _executor.submit(task); + } } @Override - protected void registerLink(final InetSocketAddress saddr, final Link link) { + protected void registerLink(final InetSocketAddress address, final Link link) { // don't do anything. } @Override - protected void unregisterLink(final InetSocketAddress saddr) { + protected void unregisterLink(final InetSocketAddress address) { // don't do anything. } @Override public void cleanUp() throws IOException { super.cleanUp(); - if (_clientConnection != null) { - _clientConnection.close(); + if (clientConnection != null && clientConnection.isOpen()) { + clientConnection.close(); } logger.info("NioClient connection closed"); } diff --git a/utils/src/main/java/com/cloud/utils/nio/NioConnection.java b/utils/src/main/java/com/cloud/utils/nio/NioConnection.java index 6be42a2ee33..98fa69716cd 100644 --- a/utils/src/main/java/com/cloud/utils/nio/NioConnection.java +++ b/utils/src/main/java/com/cloud/utils/nio/NioConnection.java @@ -32,7 +32,9 @@ import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.security.GeneralSecurityException; import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -41,6 +43,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -48,8 +52,8 @@ import javax.net.ssl.SSLEngine; import org.apache.cloudstack.framework.ca.CAService; import org.apache.cloudstack.utils.security.SSLUtils; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.NioConnectionException; @@ -60,6 +64,7 @@ import com.cloud.utils.exception.NioConnectionException; */ public abstract class NioConnection implements Callable { protected Logger logger = LogManager.getLogger(getClass()); + public static final String SERVER_BUSY_MESSAGE = "Server is busy."; protected Selector _selector; protected ExecutorService _threadExecutor; @@ -68,21 +73,36 @@ public abstract class NioConnection implements Callable { protected boolean _isRunning; protected boolean _isStartup; protected int _port; + protected int _workers; protected List _todos; protected HandlerFactory _factory; protected String _name; protected ExecutorService _executor; protected ExecutorService _sslHandshakeExecutor; protected CAService caService; + protected Set socketChannels = new HashSet<>(); + protected Integer sslHandshakeTimeout = null; + private final int factoryMaxNewConnectionsCount; public NioConnection(final String name, final int port, final int workers, final HandlerFactory factory) { _name = name; _isRunning = false; _selector = null; _port = port; + _workers = workers; _factory = factory; - _executor = new ThreadPoolExecutor(workers, 5 * workers, 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory(name + "-Handler")); - _sslHandshakeExecutor = Executors.newCachedThreadPool(new NamedThreadFactory(name + "-SSLHandshakeHandler")); + this.factoryMaxNewConnectionsCount = factory.getMaxConcurrentNewConnectionsCount(); + _executor = new ThreadPoolExecutor(workers, 5 * workers, 1, TimeUnit.DAYS, + new LinkedBlockingQueue<>(5 * workers), new NamedThreadFactory(name + "-Handler"), + new ThreadPoolExecutor.AbortPolicy()); + String sslHandshakeHandlerName = name + "-SSLHandshakeHandler"; + if (factoryMaxNewConnectionsCount > 0) { + _sslHandshakeExecutor = new ThreadPoolExecutor(0, this.factoryMaxNewConnectionsCount, 30, + TimeUnit.MINUTES, new SynchronousQueue<>(), new NamedThreadFactory(sslHandshakeHandlerName), + new ThreadPoolExecutor.AbortPolicy()); + } else { + _sslHandshakeExecutor = Executors.newCachedThreadPool(new NamedThreadFactory(sslHandshakeHandlerName)); + } } public void setCAService(final CAService caService) { @@ -90,13 +110,13 @@ public abstract class NioConnection implements Callable { } public void start() throws NioConnectionException { - _todos = new ArrayList(); + _todos = new ArrayList<>(); try { init(); } catch (final ConnectException e) { - logger.warn("Unable to connect to remote: is there a server running on port " + _port); - return; + logger.warn("Unable to connect to remote: is there a server running on port {}?", _port, e); + throw new NioConnectionException(e.getMessage(), e); } catch (final IOException e) { logger.error("Unable to initialize the threads.", e); throw new NioConnectionException(e.getMessage(), e); @@ -106,6 +126,9 @@ public abstract class NioConnection implements Callable { } _isStartup = true; + if (_executor.isShutdown()) { + _executor = new ThreadPoolExecutor(_workers, 5 * _workers, 1, TimeUnit.DAYS, new LinkedBlockingQueue<>(), new NamedThreadFactory(_name + "-Handler")); + } _threadExecutor = Executors.newSingleThreadExecutor(new NamedThreadFactory(this._name + "-NioConnectionHandler")); _isRunning = true; _futureTask = _threadExecutor.submit(this); @@ -113,6 +136,7 @@ public abstract class NioConnection implements Callable { public void stop() { _executor.shutdown(); + _sslHandshakeExecutor.shutdown(); _isRunning = false; if (_threadExecutor != null) { _futureTask.cancel(false); @@ -138,18 +162,14 @@ public abstract class NioConnection implements Callable { final Set readyKeys = _selector.selectedKeys(); final Iterator i = readyKeys.iterator(); - if (logger.isTraceEnabled()) { - logger.trace("Keys Processing: " + readyKeys.size()); - } + logger.trace("Keys Processing: {}", readyKeys.size()); // Walk through the ready keys collection. while (i.hasNext()) { final SelectionKey sk = i.next(); i.remove(); if (!sk.isValid()) { - if (logger.isTraceEnabled()) { - logger.trace("Selection Key is invalid: " + sk.toString()); - } + logger.trace("Selection Key is invalid: {}", sk); final Link link = (Link)sk.attachment(); if (link != null) { link.terminated(); @@ -190,73 +210,80 @@ public abstract class NioConnection implements Callable { abstract void unregisterLink(InetSocketAddress saddr); + protected boolean rejectConnectionIfBusy(final SocketChannel socketChannel) throws IOException { + if (factoryMaxNewConnectionsCount <= 0 || _factory.getNewConnectionsCount() < factoryMaxNewConnectionsCount) { + return false; + } + // Reject new connection if the server is busy + logger.warn("{} Rejecting new connection. {} active connections currently", + SERVER_BUSY_MESSAGE, factoryMaxNewConnectionsCount); + socketChannel.close(); + _selector.wakeup(); + return true; + } + + protected void accept(final SelectionKey key) throws IOException { final ServerSocketChannel serverSocketChannel = (ServerSocketChannel)key.channel(); final SocketChannel socketChannel = serverSocketChannel.accept(); + if (rejectConnectionIfBusy(socketChannel)) { + return; + } socketChannel.configureBlocking(false); final Socket socket = socketChannel.socket(); socket.setKeepAlive(true); - if (logger.isTraceEnabled()) { - logger.trace("Connection accepted for " + socket); - } + logger.trace("Connection accepted for {}", socket); - final SSLEngine sslEngine; try { - sslEngine = Link.initServerSSLEngine(caService, socketChannel.getRemoteAddress().toString()); - sslEngine.setUseClientMode(false); - sslEngine.setEnabledProtocols(SSLUtils.getSupportedProtocols(sslEngine.getEnabledProtocols())); final NioConnection nioConnection = this; - _sslHandshakeExecutor.submit(new Runnable() { - @Override - public void run() { - _selector.wakeup(); - try { - sslEngine.beginHandshake(); - if (!Link.doHandshake(socketChannel, sslEngine)) { - throw new IOException("SSL handshake timed out with " + socketChannel.getRemoteAddress()); - } - if (logger.isTraceEnabled()) { - logger.trace("SSL: Handshake done"); - } - final InetSocketAddress saddr = (InetSocketAddress)socket.getRemoteSocketAddress(); - final Link link = new Link(saddr, nioConnection); - link.setSSLEngine(sslEngine); - link.setKey(socketChannel.register(key.selector(), SelectionKey.OP_READ, link)); - final Task task = _factory.create(Task.Type.CONNECT, link, null); - registerLink(saddr, link); - _executor.submit(task); - } catch (IOException e) { - if (logger.isTraceEnabled()) { - logger.trace("Connection closed due to failure: " + e.getMessage()); - } - closeAutoCloseable(socket, "accepting socket"); - closeAutoCloseable(socketChannel, "accepting socketChannel"); - } finally { - _selector.wakeup(); + _sslHandshakeExecutor.submit(() -> { + final InetSocketAddress socketAddress = (InetSocketAddress)socket.getRemoteSocketAddress(); + _factory.registerNewConnection(socketAddress); + _selector.wakeup(); + try { + final SSLEngine sslEngine = Link.initServerSSLEngine(caService, socketChannel.getRemoteAddress().toString()); + sslEngine.setUseClientMode(false); + sslEngine.setEnabledProtocols(SSLUtils.getSupportedProtocols(sslEngine.getEnabledProtocols())); + sslEngine.beginHandshake(); + if (!Link.doHandshake(socketChannel, sslEngine, getSslHandshakeTimeout())) { + throw new IOException("SSL handshake timed out with " + socketAddress); } + logger.trace("SSL: Handshake done"); + final Link link = new Link(socketAddress, nioConnection); + link.setSSLEngine(sslEngine); + link.setKey(socketChannel.register(key.selector(), SelectionKey.OP_READ, link)); + final Task task = _factory.create(Task.Type.CONNECT, link, null); + registerLink(socketAddress, link); + _executor.submit(task); + } catch (final GeneralSecurityException | IOException e) { + _factory.unregisterNewConnection(socketAddress); + logger.trace("Connection closed with {} due to failure: {}", socket.getRemoteSocketAddress(), e.getMessage()); + closeAutoCloseable(socket, "accepting socket"); + closeAutoCloseable(socketChannel, "accepting socketChannel"); + } finally { + _selector.wakeup(); } }); - } catch (final Exception e) { - if (logger.isTraceEnabled()) { - logger.trace("Connection closed due to failure: " + e.getMessage()); - } - closeAutoCloseable(socket, "accepting socket"); - closeAutoCloseable(socketChannel, "accepting socketChannel"); + } catch (final RejectedExecutionException e) { + logger.trace("{} Accept Task rejected: {}", socket.getRemoteSocketAddress(), e.getMessage()); + closeAutoCloseable(socket, "Rejecting connection - accepting socket"); + closeAutoCloseable(socketChannel, "Rejecting connection - accepting socketChannel"); } finally { _selector.wakeup(); } } - protected void terminate(final SelectionKey key) { + protected void terminate(final SelectionKey key, String msg) { final Link link = (Link)key.attachment(); closeConnection(key); if (link != null) { + logger.trace("Will terminate connection due to: {}", msg); link.terminated(); final Task task = _factory.create(Task.Type.DISCONNECT, link, null); unregisterLink(link.getSocketAddress()); - + _factory.unregisterNewConnection(link.getSocketAddress()); try { _executor.submit(task); } catch (final Exception e) { @@ -269,14 +296,10 @@ public abstract class NioConnection implements Callable { final Link link = (Link)key.attachment(); try { final SocketChannel socketChannel = (SocketChannel)key.channel(); - if (logger.isTraceEnabled()) { - logger.trace("Reading from: " + socketChannel.socket().toString()); - } + logger.trace("Reading from: {}", socketChannel.socket().toString()); final byte[] data = link.read(socketChannel); if (data == null) { - if (logger.isTraceEnabled()) { - logger.trace("Packet is incomplete. Waiting for more."); - } + logger.trace("Packet is incomplete. Waiting for more."); return; } final Task task = _factory.create(Task.Type.DATA, link, data); @@ -288,7 +311,7 @@ public abstract class NioConnection implements Callable { } } catch (final Exception e) { logDebug(e, key, 1); - terminate(key); + terminate(key, e.getMessage()); } } @@ -322,18 +345,17 @@ public abstract class NioConnection implements Callable { protected void processTodos() { List todos; - if (_todos.size() == 0) { + if (_todos.isEmpty()) { return; // Nothing to do. } synchronized (this) { todos = _todos; - _todos = new ArrayList(); + _todos = new ArrayList<>(); } - if (logger.isTraceEnabled()) { - logger.trace("Todos Processing: " + todos.size()); - } + logger.trace("Todos Processing: {}", todos.size()); + SelectionKey key; for (final ChangeRequest todo : todos) { switch (todo.type) { @@ -360,7 +382,7 @@ public abstract class NioConnection implements Callable { link.setKey(key); } } catch (final ClosedChannelException e) { - logger.warn("Couldn't register socket: " + todo.key); + logger.warn("Couldn't register socket: {}", todo.key); try { ((SocketChannel)todo.key).close(); } catch (final IOException ignore) { @@ -372,9 +394,7 @@ public abstract class NioConnection implements Callable { } break; case ChangeRequest.CLOSE: - if (logger.isTraceEnabled()) { - logger.trace("Trying to close " + todo.key); - } + logger.trace("Trying to close {}", todo.key); key = (SelectionKey)todo.key; closeConnection(key); if (key != null) { @@ -402,9 +422,7 @@ public abstract class NioConnection implements Callable { if (!socket.getKeepAlive()) { socket.setKeepAlive(true); } - if (logger.isDebugEnabled()) { - logger.debug("Connected to " + socket); - } + logger.debug("Connected to {}", socket); final Link link = new Link((InetSocketAddress)socket.getRemoteSocketAddress(), this); link.setKey(key); key.attach(link); @@ -413,11 +431,11 @@ public abstract class NioConnection implements Callable { try { _executor.submit(task); } catch (final Exception e) { - logger.warn("Exception occurred when submitting the task", e); + logger.warn("Exception occurred when submitting the task for connect: {}", socket, e); } } catch (final IOException e) { logTrace(e, key, 2); - terminate(key); + terminate(key, e.getMessage()); } } @@ -432,9 +450,7 @@ public abstract class NioConnection implements Callable { protected void write(final SelectionKey key) throws IOException { final Link link = (Link)key.attachment(); try { - if (logger.isTraceEnabled()) { - logger.trace("Writing to " + link.getSocketAddress().toString()); - } + logger.trace("Writing to {}", link.getSocketAddress().toString()); final boolean close = link.write((SocketChannel)key.channel()); if (close) { closeConnection(key); @@ -444,7 +460,7 @@ public abstract class NioConnection implements Callable { } } catch (final Exception e) { logDebug(e, key, 3); - terminate(key); + terminate(key, e.getMessage()); } } @@ -454,9 +470,7 @@ public abstract class NioConnection implements Callable { key.cancel(); try { if (channel != null) { - if (logger.isDebugEnabled()) { - logger.debug("Closing socket " + channel.socket()); - } + logger.debug("Closing socket {}", channel.socket()); channel.close(); } } catch (final IOException ignore) { @@ -491,6 +505,16 @@ public abstract class NioConnection implements Callable { /* Release the resource used by the instance */ public void cleanUp() throws IOException { + for (SocketChannel channel : socketChannels) { + if (channel != null && channel.isOpen()) { + try { + logger.info("Closing connection: {}", channel.getRemoteAddress()); + channel.close(); + } catch (IOException e) { + logger.warn("Unable to close connection due to {}", e.getMessage()); + } + } + } if (_selector != null) { _selector.close(); } @@ -513,4 +537,12 @@ public abstract class NioConnection implements Callable { this.att = att; } } + + public Integer getSslHandshakeTimeout() { + return sslHandshakeTimeout; + } + + public void setSslHandshakeTimeout(Integer sslHandshakeTimeout) { + this.sslHandshakeTimeout = sslHandshakeTimeout; + } } diff --git a/utils/src/main/java/com/cloud/utils/nio/NioServer.java b/utils/src/main/java/com/cloud/utils/nio/NioServer.java index dfc42b5fd21..fd5af516bad 100644 --- a/utils/src/main/java/com/cloud/utils/nio/NioServer.java +++ b/utils/src/main/java/com/cloud/utils/nio/NioServer.java @@ -31,54 +31,56 @@ import org.apache.cloudstack.framework.ca.CAService; public class NioServer extends NioConnection { - protected InetSocketAddress _localAddr; - private ServerSocketChannel _serverSocket; + protected InetSocketAddress localAddress; + private ServerSocketChannel serverSocket; - protected WeakHashMap _links; + protected WeakHashMap links; - public NioServer(final String name, final int port, final int workers, final HandlerFactory factory, final CAService caService) { - super(name, port, workers, factory); + public NioServer(final String name, final int port, final int workers, final HandlerFactory factory, + final CAService caService, final Integer sslHandShakeTimeout) { + super(name, port, workers,factory); setCAService(caService); - _localAddr = null; - _links = new WeakHashMap(1024); + setSslHandshakeTimeout(sslHandShakeTimeout); + localAddress = null; + links = new WeakHashMap<>(1024); } public int getPort() { - return _serverSocket.socket().getLocalPort(); + return serverSocket.socket().getLocalPort(); } @Override protected void init() throws IOException { _selector = SelectorProvider.provider().openSelector(); - _serverSocket = ServerSocketChannel.open(); - _serverSocket.configureBlocking(false); + serverSocket = ServerSocketChannel.open(); + serverSocket.configureBlocking(false); - _localAddr = new InetSocketAddress(_port); - _serverSocket.socket().bind(_localAddr); + localAddress = new InetSocketAddress(_port); + serverSocket.socket().bind(localAddress); - _serverSocket.register(_selector, SelectionKey.OP_ACCEPT, null); + serverSocket.register(_selector, SelectionKey.OP_ACCEPT, null); - logger.info("NioServer started and listening on " + _serverSocket.socket().getLocalSocketAddress()); + logger.info("NioServer started and listening on {}", serverSocket.socket().getLocalSocketAddress()); } @Override public void cleanUp() throws IOException { super.cleanUp(); - if (_serverSocket != null) { - _serverSocket.close(); + if (serverSocket != null && serverSocket.isOpen()) { + serverSocket.close(); } - logger.info("NioConnection stopped on " + _localAddr.toString()); + logger.info("NioConnection stopped on {}", localAddress.toString()); } @Override protected void registerLink(final InetSocketAddress addr, final Link link) { - _links.put(addr, link); + links.put(addr, link); } @Override protected void unregisterLink(final InetSocketAddress saddr) { - _links.remove(saddr); + links.remove(saddr); } /** @@ -91,7 +93,7 @@ public class NioServer extends NioConnection { * @return null if not sent. attach object in link if sent. */ public Object send(final InetSocketAddress saddr, final byte[] data) throws ClosedChannelException { - final Link link = _links.get(saddr); + final Link link = links.get(saddr); if (link == null) { return null; } diff --git a/utils/src/main/java/org/apache/cloudstack/utils/cache/LazyCache.java b/utils/src/main/java/org/apache/cloudstack/utils/cache/LazyCache.java new file mode 100644 index 00000000000..0b4c91e24b3 --- /dev/null +++ b/utils/src/main/java/org/apache/cloudstack/utils/cache/LazyCache.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.utils.cache; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; + +public class LazyCache { + + private final LoadingCache cache; + + public LazyCache(long maximumSize, long expireAfterWriteSeconds, Function loader) { + this.cache = Caffeine.newBuilder() + .maximumSize(maximumSize) + .expireAfterWrite(expireAfterWriteSeconds, TimeUnit.SECONDS) + .build(loader::apply); + } + + public V get(K key) { + return cache.get(key); + } + + public void invalidate(K key) { + cache.invalidate(key); + } + + public void clear() { + cache.invalidateAll(); + } +} diff --git a/utils/src/main/java/org/apache/cloudstack/utils/cache/SingleCache.java b/utils/src/main/java/org/apache/cloudstack/utils/cache/SingleCache.java new file mode 100644 index 00000000000..5fa77d9a28c --- /dev/null +++ b/utils/src/main/java/org/apache/cloudstack/utils/cache/SingleCache.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.utils.cache; + +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; + +public class SingleCache { + + private final LoadingCache cache; + + public SingleCache(long expireAfterWriteSeconds, Supplier loader) { + this.cache = Caffeine.newBuilder() + .maximumSize(1) + .expireAfterWrite(expireAfterWriteSeconds, TimeUnit.SECONDS) + .build(key -> loader.get()); + } + + public V get() { + return cache.get(0); + } + + public void invalidate() { + cache.invalidate(0); + } + + public void clear() { + cache.invalidateAll(); + } +} diff --git a/utils/src/test/java/com/cloud/utils/testcase/NioTest.java b/utils/src/test/java/com/cloud/utils/testcase/NioTest.java index a08814876f9..7d605e9f92b 100644 --- a/utils/src/test/java/com/cloud/utils/testcase/NioTest.java +++ b/utils/src/test/java/com/cloud/utils/testcase/NioTest.java @@ -19,21 +19,6 @@ package com.cloud.utils.testcase; -import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.exception.NioConnectionException; -import com.cloud.utils.nio.HandlerFactory; -import com.cloud.utils.nio.Link; -import com.cloud.utils.nio.NioClient; -import com.cloud.utils.nio.NioServer; -import com.cloud.utils.nio.Task; -import com.cloud.utils.nio.Task.Type; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - import java.io.IOException; import java.net.InetSocketAddress; import java.nio.channels.ClosedChannelException; @@ -45,6 +30,22 @@ import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.exception.NioConnectionException; +import com.cloud.utils.nio.HandlerFactory; +import com.cloud.utils.nio.Link; +import com.cloud.utils.nio.NioClient; +import com.cloud.utils.nio.NioServer; +import com.cloud.utils.nio.Task; +import com.cloud.utils.nio.Task.Type; + /** * NioTest demonstrates that NioServer can function without getting its main IO * loop blocked when an aggressive or malicious client connects to the server but @@ -99,7 +100,7 @@ public class NioTest { testBytes = new byte[1000000]; randomGenerator.nextBytes(testBytes); - server = new NioServer("NioTestServer", 0, 1, new NioTestServer(), null); + server = new NioServer("NioTestServer", 0, 1, new NioTestServer(), null, null); try { server.start(); } catch (final NioConnectionException e) { @@ -111,7 +112,7 @@ public class NioTest { maliciousClients.add(maliciousClient); maliciousExecutor.submit(new ThreadedNioClient(maliciousClient)); - final NioClient client = new NioClient("NioTestClient-" + i, "127.0.0.1", server.getPort(), 1, new NioTestClient()); + final NioClient client = new NioClient("NioTestClient-" + i, "127.0.0.1", server.getPort(), 1, null, new NioTestClient()); clients.add(client); clientExecutor.submit(new ThreadedNioClient(client)); } @@ -180,17 +181,17 @@ public class NioTest { public class NioMaliciousClient extends NioClient { public NioMaliciousClient(String name, String host, int port, int workers, HandlerFactory factory) { - super(name, host, port, workers, factory); + super(name, host, port, workers, null, factory); } @Override protected void init() throws IOException { _selector = Selector.open(); try { - _clientConnection = SocketChannel.open(); - logger.info("Connecting to " + _host + ":" + _port); - final InetSocketAddress peerAddr = new InetSocketAddress(_host, _port); - _clientConnection.connect(peerAddr); + clientConnection = SocketChannel.open(); + logger.info("Connecting to {}:{}", host, _port); + final InetSocketAddress peerAddr = new InetSocketAddress(host, _port); + clientConnection.connect(peerAddr); // This is done on purpose, the malicious client would connect // to the server and then do nothing, hence using a large sleep value Thread.sleep(Long.MAX_VALUE); diff --git a/utils/src/test/java/org/apache/cloudstack/utils/cache/LazyCacheTest.java b/utils/src/test/java/org/apache/cloudstack/utils/cache/LazyCacheTest.java new file mode 100644 index 00000000000..75d31b95fcc --- /dev/null +++ b/utils/src/test/java/org/apache/cloudstack/utils/cache/LazyCacheTest.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.utils.cache; + +import static org.junit.Assert.assertEquals; + +import java.util.function.Function; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class LazyCacheTest { + private final long expireSeconds = 1; + private final String cacheValuePrefix = "ComputedValueFor:"; + private LazyCache cache; + private Function mockLoader; + + @Before + public void setUp() { + mockLoader = Mockito.mock(Function.class); + Mockito.when(mockLoader.apply(Mockito.anyString())).thenAnswer(invocation -> cacheValuePrefix + invocation.getArgument(0)); + cache = new LazyCache<>(4, expireSeconds, mockLoader); + } + + @Test + public void testCacheMissAndLoader() { + String key = "key1"; + String value = cache.get(key); + assertEquals(cacheValuePrefix + key, value); + Mockito.verify(mockLoader).apply(key); + } + + @Test + public void testLoaderNotCalledIfPresent() { + String key = "key2"; + cache.get(key); + try { + Thread.sleep((long)(0.9 * expireSeconds * 1000)); + } catch (InterruptedException ie) { + Assert.fail(String.format("Exception occurred: %s", ie.getMessage())); + } + cache.get(key); + Mockito.verify(mockLoader, Mockito.times(1)).apply(key); + } + + @Test + public void testCacheExpiration() { + String key = "key3"; + cache.get(key); + try { + Thread.sleep((long)(1.1 * expireSeconds * 1000)); + } catch (InterruptedException ie) { + Assert.fail(String.format("Exception occurred: %s", ie.getMessage())); + } + cache.get(key); + Mockito.verify(mockLoader, Mockito.times(2)).apply(key); + } + + @Test + public void testInvalidateKey() { + String key = "key4"; + cache.get(key); + cache.invalidate(key); + cache.get(key); + Mockito.verify(mockLoader, Mockito.times(2)).apply(key); + } + + @Test + public void testClearCache() { + String key1 = "key5"; + String key2 = "key6"; + cache.get(key1); + cache.get(key2); + cache.clear(); + cache.get(key1); + Mockito.verify(mockLoader, Mockito.times(2)).apply(key1); + Mockito.verify(mockLoader, Mockito.times(1)).apply(key2); + } + + @Test + public void testMaximumSize() { + String key = "key7"; + cache.get(key); + for (int i = 0; i < 4; i++) { + cache.get(String.format("newkey-%d", i)); + } + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + Assert.fail(String.format("Exception occurred: %s", ie.getMessage())); + } + cache.get(key); + Mockito.verify(mockLoader, Mockito.times(2)).apply(key); + } +}