diff --git a/.asf.yaml b/.asf.yaml index c052077c753..43f0351bc7c 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -51,16 +51,13 @@ github: collaborators: - acs-robot - - rajujith - - GaOrtiga - - SadiJr - - winterhazel - gpordeus - hsato03 - bernardodemarco - abh1sar - FelipeM525 - lucas-a-martins + - nicoschmdt protected_branches: ~ diff --git a/.python-version b/.python-version index d70c8f8d89f..c8cfe395918 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.6 +3.10 diff --git a/README.md b/README.md index cc71c848d5d..f66a4dc6f97 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Apache CloudStack [![Build Status](https://github.com/apache/cloudstack/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/build.yml) [![UI Build](https://github.com/apache/cloudstack/actions/workflows/ui.yml/badge.svg)](https://github.com/apache/cloudstack/actions/workflows/ui.yml) [![License Check](https://github.com/apache/cloudstack/actions/workflows/rat.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/rat.yml) [![Simulator CI](https://github.com/apache/cloudstack/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/ci.yml) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache_cloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache_cloudstack) [![codecov](https://codecov.io/gh/apache/cloudstack/branch/main/graph/badge.svg)](https://codecov.io/gh/apache/cloudstack) -[![Apache CloudStack](tools/logo/acsxmas.jpg)](https://cloudstack.apache.org/) +[![Apache CloudStack](tools/logo/apache_cloudstack.png)](https://cloudstack.apache.org/) Apache CloudStack is open source software designed to deploy and manage large networks of virtual machines, as a highly available, highly scalable diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 515614fff16..bff7078fd9f 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -209,7 +209,7 @@ hypervisor.type=kvm # the management server would send. # In case of arm64 (aarch64), this will change the machine type to 'virt' and # adds a SCSI and a USB controller in the domain xml. -# Possible values: x86_64 | aarch64 +# Possible values: x86_64 | aarch64 | s390x # If null (default), defaults to the VM's OS architecture #guest.cpu.arch= @@ -434,3 +434,10 @@ iscsi.session.cleanup.enabled=false # Implicit host tags managed by agent.properties # host.tags= + +# Timeout(in seconds) for SSL handshake when agent connects to server. When no value is set then default value of 30s +# will be used +#ssl.handshake.timeout= + +# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used +#backoff.seconds= diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 97803477115..0a76bfbb4f8 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -31,22 +31,22 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Timer; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import javax.naming.ConfigurationException; -import com.cloud.agent.api.MigrateAgentConnectionAnswer; -import com.cloud.agent.api.MigrateAgentConnectionCommand; -import com.cloud.resource.AgentStatusUpdater; -import com.cloud.resource.ResourceStatusUpdater; -import com.cloud.agent.api.PingAnswer; -import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.agent.lb.SetupMSListAnswer; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.ca.PostCertificateRenewalCommand; @@ -58,10 +58,10 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.ObjectUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.ObjectUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; @@ -70,6 +70,9 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.CronCommand; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainCommand; +import com.cloud.agent.api.MigrateAgentConnectionAnswer; +import com.cloud.agent.api.MigrateAgentConnectionCommand; +import com.cloud.agent.api.PingAnswer; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.ShutdownCommand; @@ -79,9 +82,12 @@ import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.exception.AgentControlChannelException; import com.cloud.host.Host; +import com.cloud.resource.AgentStatusUpdater; +import com.cloud.resource.ResourceStatusUpdater; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; +import com.cloud.utils.StringUtils; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.NioConnectionException; @@ -93,7 +99,6 @@ import com.cloud.utils.nio.NioConnection; import com.cloud.utils.nio.Task; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.logging.log4j.ThreadContext; /** * @config @@ -117,7 +122,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater Configuration(66), // Exiting due to configuration problems. Error(67); // Exiting because of error. - int value; + final int value; ExitStatus(final int value) { this.value = value; @@ -128,133 +133,162 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - List _controlListeners = new ArrayList(); + CopyOnWriteArrayList controlListeners = new CopyOnWriteArrayList<>(); - IAgentShell _shell; - NioConnection _connection; - ServerResource _resource; - Link _link; - Long _id; + IAgentShell shell; + NioConnection connection; + ServerResource serverResource; + Link link; + Long id; String _uuid; String _name; - Timer _timer = new Timer("Agent Timer"); - Timer certTimer; - Timer hostLBTimer; + ScheduledExecutorService selfTaskExecutor; + ScheduledExecutorService certExecutor; + ScheduledExecutorService hostLbCheckExecutor; - List _watchList = new ArrayList(); - long _sequence = 0; - long _lastPingResponseTime = 0; - long _pingInterval = 0; - AtomicInteger _inProgress = new AtomicInteger(); + CopyOnWriteArrayList> watchList = new CopyOnWriteArrayList<>(); + AtomicLong sequence = new AtomicLong(0); + AtomicLong lastPingResponseTime = new AtomicLong(0L); + long pingInterval = 0; + AtomicInteger commandsInProgress = new AtomicInteger(0); - StartupTask _startup = null; - long _startupWaitDefault = 180000; - long _startupWait = _startupWaitDefault; - boolean _reconnectAllowed = true; - //For time sentitive task, e.g. PingTask - ThreadPoolExecutor _ugentTaskPool; - ExecutorService _executor; + private final AtomicReference startupTask = new AtomicReference<>(); + private static final long DEFAULT_STARTUP_WAIT = 180; + long startupWait = DEFAULT_STARTUP_WAIT; + boolean reconnectAllowed = true; - Thread _shutdownThread = new ShutdownThread(this); + //For time sensitive task, e.g. PingTask + ThreadPoolExecutor outRequestHandler; + ExecutorService requestHandler; - private String _keystoreSetupPath; - private String _keystoreCertImportPath; + Thread shutdownThread = new ShutdownThread(this); - // for simulator use only + private String keystoreSetupSetupPath; + private String keystoreCertImportScriptPath; + + private String hostname; + + protected String getLinkLog(final Link link) { + if (link == null) { + return ""; + } + StringBuilder str = new StringBuilder(); + if (logger.isTraceEnabled()) { + str.append(System.identityHashCode(link)).append("-"); + } + str.append(link.getSocketAddress()); + return str.toString(); + } + + protected String getAgentName() { + return (serverResource != null && serverResource.isAppendAgentNameToLogs() && + StringUtils.isNotBlank(serverResource.getName())) ? + serverResource.getName() : + "Agent"; + } + + protected void setupShutdownHookAndInitExecutors() { + logger.trace("Adding shutdown hook"); + Runtime.getRuntime().addShutdownHook(shutdownThread); + selfTaskExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Agent-SelfTask")); + outRequestHandler = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, + new SynchronousQueue<>(), new NamedThreadFactory("AgentOutRequest-Handler")); + requestHandler = new ThreadPoolExecutor(shell.getWorkers(), 5 * shell.getWorkers(), 1, TimeUnit.DAYS, + new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentRequest-Handler")); + } + + /** + * Constructor for the {@code Agent} class, intended for simulator use only. + * + *

This constructor initializes the agent with a provided {@link IAgentShell}. + * It sets up the necessary NIO client connection, establishes a shutdown hook, + * and initializes the thread executors. + * + * @param shell the {@link IAgentShell} instance that provides agent configuration and runtime information. + */ public Agent(final IAgentShell shell) { - _shell = shell; - _link = null; - - _connection = new NioClient("Agent", _shell.getNextHost(), _shell.getPort(), _shell.getWorkers(), this); - - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); + this.shell = shell; + this.link = null; + this.connection = new NioClient( + getAgentName(), + this.shell.getNextHost(), + this.shell.getPort(), + this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), + this + ); + setupShutdownHookAndInitExecutors(); } public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { - _shell = shell; - _resource = resource; - _link = null; - + this.shell = shell; + serverResource = resource; + link = null; resource.setAgentControl(this); - - final String value = _shell.getPersistentProperty(getResourceName(), "id"); - _uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); - _name = _shell.getPersistentProperty(getResourceName(), "name"); - _id = value != null ? Long.parseLong(value) : null; - logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name); + final String value = shell.getPersistentProperty(getResourceName(), "id"); + _uuid = shell.getPersistentProperty(getResourceName(), "uuid"); + _name = shell.getPersistentProperty(getResourceName(), "name"); + id = value != null ? Long.parseLong(value) : null; + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(id, ""), _uuid, _name); final Map params = new HashMap<>(); - // merge with properties from command line to let resource access command line parameters - for (final Map.Entry cmdLineProp : _shell.getCmdLineProperties().entrySet()) { + for (final Map.Entry cmdLineProp : this.shell.getCmdLineProperties().entrySet()) { params.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - - if (!_resource.configure(getResourceName(), params)) { - throw new ConfigurationException("Unable to configure " + _resource.getName()); + if (!serverResource.configure(getResourceName(), params)) { + throw new ConfigurationException("Unable to configure " + serverResource.getName()); } + ThreadContext.put("agentname", getAgentName()); + final String host = this.shell.getNextHost(); + connection = new NioClient(getAgentName(), host, this.shell.getPort(), this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), this); + setupShutdownHookAndInitExecutors(); + logger.info("{} with host = {}, local id = {}", this, host, localAgentId); + } - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); - - logger.debug("Adding shutdown hook"); - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); - - logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", - ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + @Override + public String toString() { + return String.format("Agent [id = %s, uuid = %s, name = %s, type = %s, zone = %s, pod = %s, workers = %d, port = %d]", + ObjectUtils.defaultIfNull(id, "new"), + _uuid, + _name, + getResourceName(), + this.shell.getZone(), + this.shell.getPod(), + this.shell.getWorkers(), + this.shell.getPort()); } public String getVersion() { - return _shell.getVersion(); + return shell.getVersion(); } public String getResourceGuid() { - final String guid = _shell.getGuid(); + final String guid = shell.getGuid(); return guid + "-" + getResourceName(); } public String getZone() { - return _shell.getZone(); + return shell.getZone(); } public String getPod() { - return _shell.getPod(); + return shell.getPod(); } protected void setLink(final Link link) { - _link = link; + this.link = link; } public ServerResource getResource() { - return _resource; - } - - public BackoffAlgorithm getBackoffAlgorithm() { - return _shell.getBackoffAlgorithm(); + return serverResource; } public String getResourceName() { - return _resource.getClass().getSimpleName(); + return serverResource.getClass().getSimpleName(); } /** @@ -263,70 +297,64 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater * agent instances and its inner objects. */ private void scavengeOldAgentObjects() { - _executor.submit(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(2000L); - } catch (final InterruptedException ignored) { - } finally { - System.gc(); - } + requestHandler.submit(() -> { + try { + Thread.sleep(2000L); + } catch (final InterruptedException ignored) { + } finally { + System.gc(); } }); } public void start() { - if (!_resource.start()) { - logger.error("Unable to start the resource: {}", _resource.getName()); - throw new CloudRuntimeException("Unable to start the resource: " + _resource.getName()); + if (!serverResource.start()) { + String msg = String.format("Unable to start the resource: %s", serverResource.getName()); + logger.error(msg); + throw new CloudRuntimeException(msg); } - _keystoreSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); - if (_keystoreSetupPath == null) { + keystoreSetupSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); + if (keystoreSetupSetupPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_SETUP_SCRIPT)); } - _keystoreCertImportPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); - if (_keystoreCertImportPath == null) { + keystoreCertImportScriptPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); + if (keystoreCertImportScriptPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_IMPORT_SCRIPT)); } try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { logger.warn("Attempt to connect to server generated NIO Connection Exception {}, trying again", e.getLocalizedMessage()); } - while (!_connection.isStartup()) { - final String host = _shell.getNextHost(); - _shell.getBackoffAlgorithm().waitBeforeRetry(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Connecting to host:{}", host); + while (!connection.isStartup()) { + final String host = shell.getNextHost(); + shell.getBackoffAlgorithm().waitBeforeRetry(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), + shell.getSslHandshakeTimeout(), this); + logger.info("Connecting to host: {}", host); try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } + stopAndCleanupConnection(false); logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); } } - _shell.updateConnectedHost(); + shell.updateConnectedHost(); scavengeOldAgentObjects(); } public void stop(final String reason, final String detail) { - logger.info("Stopping the agent: Reason = {} {}", reason, ": Detail = " + ObjectUtils.defaultIfNull(detail, "")); - _reconnectAllowed = false; - if (_connection != null) { + logger.info("Stopping the agent: Reason = {}{}", reason, (detail != null ? ": Detail = " + detail : "")); + reconnectAllowed = false; + if (connection != null) { final ShutdownCommand cmd = new ShutdownCommand(reason, detail); try { - if (_link != null) { - final Request req = new Request(_id != null ? _id : -1, -1, cmd, false); - _link.send(req.toBytes()); + if (link != null) { + final Request req = new Request(id != null ? id : -1, -1, cmd, false); + link.send(req.toBytes()); } } catch (final ClosedChannelException e) { logger.warn("Unable to send: {}", cmd.toString()); @@ -339,53 +367,54 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } catch (final InterruptedException e) { logger.debug("Who the heck interrupted me here?"); } - _connection.stop(); - _connection = null; - _link = null; + connection.stop(); + connection = null; + link = null; } - if (_resource != null) { - _resource.stop(); - _resource = null; + if (serverResource != null) { + serverResource.stop(); + serverResource = null; } - if (_startup != null) { - _startup = null; + if (startupTask.get() != null) { + startupTask.set(null); } - if (_ugentTaskPool != null) { - _ugentTaskPool.shutdownNow(); - _ugentTaskPool = null; + if (outRequestHandler != null) { + outRequestHandler.shutdownNow(); + outRequestHandler = null; } - if (_executor != null) { - _executor.shutdown(); - _executor = null; + if (requestHandler != null) { + requestHandler.shutdown(); + requestHandler = null; } - if (_timer != null) { - _timer.cancel(); - _timer = null; + if (selfTaskExecutor != null) { + selfTaskExecutor.shutdown(); + selfTaskExecutor = null; } - if (hostLBTimer != null) { - hostLBTimer.cancel(); - hostLBTimer = null; + if (hostLbCheckExecutor != null) { + hostLbCheckExecutor.shutdown(); + hostLbCheckExecutor = null; } - if (certTimer != null) { - certTimer.cancel(); - certTimer = null; + if (certExecutor != null) { + certExecutor.shutdown(); + certExecutor = null; } } public Long getId() { - return _id; + return id; } public void setId(final Long id) { - _id = id; - _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); + logger.debug("Set agent id {}", id); + this.id = id; + shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } public String getUuid() { @@ -394,7 +423,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void setUuid(String uuid) { this._uuid = uuid; - _shell.setPersistentProperty(getResourceName(), "uuid", uuid); + shell.setPersistentProperty(getResourceName(), "uuid", uuid); } public String getName() { @@ -403,61 +432,75 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void setName(String name) { this._name = name; - _shell.setPersistentProperty(getResourceName(), "name", name); + shell.setPersistentProperty(getResourceName(), "name", name); } - private synchronized void scheduleServicesRestartTask() { - if (certTimer != null) { - certTimer.cancel(); - certTimer.purge(); + private void scheduleCertificateRenewalTask() { + String name = "CertificateRenewalTask"; + if (certExecutor != null && !certExecutor.isShutdown()) { + certExecutor.shutdown(); + try { + if (!certExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + certExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + certExecutor.shutdownNow(); + } } - certTimer = new Timer("Certificate Renewal Timer"); - certTimer.schedule(new PostCertificateRenewalTask(this), 5000L); + certExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + certExecutor.schedule(new PostCertificateRenewalTask(this), 5, TimeUnit.SECONDS); } - private synchronized void scheduleHostLBCheckerTask(final long checkInterval) { - if (hostLBTimer != null) { - hostLBTimer.cancel(); + private void scheduleHostLBCheckerTask(final long checkInterval) { + String name = "HostLBCheckerTask"; + if (hostLbCheckExecutor != null && !hostLbCheckExecutor.isShutdown()) { + hostLbCheckExecutor.shutdown(); + try { + if (!hostLbCheckExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + hostLbCheckExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + hostLbCheckExecutor.shutdownNow(); + } } if (checkInterval > 0L) { - logger.info("Scheduling preferred host timer task with host.lb.interval={}ms", checkInterval); - hostLBTimer = new Timer("Host LB Timer"); - hostLBTimer.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval); + logger.info("Scheduling preferred host task with host.lb.interval={}ms", checkInterval); + hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval, + TimeUnit.MILLISECONDS); } } public void scheduleWatch(final Link link, final Request request, final long delay, final long period) { - synchronized (_watchList) { - logger.debug("Adding task with request: {} to watch list", request.toString()); - - final WatchTask task = new WatchTask(link, request, this); - _timer.schedule(task, 0, period); - _watchList.add(task); - } + logger.debug("Adding a watch list"); + final WatchTask task = new WatchTask(link, request, this); + final ScheduledFuture future = selfTaskExecutor.scheduleAtFixedRate(task, delay, period, TimeUnit.MILLISECONDS); + watchList.add(future); } public void triggerUpdate() { - PingCommand command = _resource.getCurrentStatus(getId()); + PingCommand command = serverResource.getCurrentStatus(getId()); command.setOutOfBand(true); logger.debug("Sending out of band ping"); - - final Request request = new Request(_id, -1, command, false); + final Request request = new Request(id, -1, command, false); request.setSequence(getNextSequence()); try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to send ping update: {}", request.toString()); } } protected void cancelTasks() { - synchronized (_watchList) { - for (final WatchTask task : _watchList) { - task.cancel(); - } - logger.debug("Clearing {} tasks of watch list", _watchList.size()); - _watchList.clear(); + for (final ScheduledFuture task : watchList) { + task.cancel(true); } + logger.debug("Clearing watch list: {}", () -> watchList.size()); + watchList.clear(); } /** @@ -468,14 +511,34 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater * when host is added back */ protected void cleanupAgentZoneProperties() { - _shell.setPersistentProperty(null, "zone", ""); - _shell.setPersistentProperty(null, "cluster", ""); - _shell.setPersistentProperty(null, "pod", ""); + shell.setPersistentProperty(null, "zone", ""); + shell.setPersistentProperty(null, "cluster", ""); + shell.setPersistentProperty(null, "pod", ""); } - public synchronized void lockStartupTask(final Link link) { - _startup = new StartupTask(link); - _timer.schedule(_startup, _startupWait); + public void lockStartupTask(final Link link) { + logger.debug("Creating startup task for link: {}", () -> getLinkLog(link)); + StartupTask currentTask = startupTask.get(); + if (currentTask != null) { + logger.warn("A Startup task is already locked or in progress, cannot create for link {}", + getLinkLog(link)); + return; + } + currentTask = new StartupTask(link); + if (startupTask.compareAndSet(null, currentTask)) { + selfTaskExecutor.schedule(currentTask, startupWait, TimeUnit.SECONDS); + return; + } + logger.warn("Failed to lock a StartupTask for link: {}", getLinkLog(link)); + } + + protected boolean cancelStartupTask() { + StartupTask task = startupTask.getAndSet(null); + if (task != null) { + task.cancel(); + return true; + } + return false; } public void sendStartup(final Link link) { @@ -483,9 +546,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } public void sendStartup(final Link link, boolean transfer) { - final StartupCommand[] startup = _resource.initialize(); + final StartupCommand[] startup = serverResource.initialize(); if (startup != null) { - final String msHostList = _shell.getPersistentProperty(null, "host"); + final String msHostList = shell.getPersistentProperty(null, "host"); final Command[] commands = new Command[startup.length]; for (int i = 0; i < startup.length; i++) { setupStartupCommand(startup[i]); @@ -493,7 +556,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater startup[i].setConnectionTransferred(transfer); commands[i] = startup[i]; } - final Request request = new Request(_id != null ? _id : -1, -1, commands, false, false); + final Request request = new Request(id != null ? id : -1, -1, commands, false, false); request.setSequence(getNextSequence()); logger.debug("Sending Startup: {}", request.toString()); @@ -501,31 +564,37 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater try { link.send(request.toBytes()); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(link), e.getMessage(), request); } - if (_resource instanceof ResourceStatusUpdater) { - ((ResourceStatusUpdater) _resource).registerStatusUpdater(this); + if (serverResource instanceof ResourceStatusUpdater) { + ((ResourceStatusUpdater) serverResource).registerStatusUpdater(this); } } } - protected void setupStartupCommand(final StartupCommand startup) { - InetAddress addr; + protected String retrieveHostname() { + logger.trace("Retrieving hostname with resource={}", () -> serverResource.getClass().getSimpleName()); + final String result = Script.runSimpleBashScript(Script.getExecutableAbsolutePath("hostname"), 500); + if (StringUtils.isNotBlank(result)) { + return result; + } try { - addr = InetAddress.getLocalHost(); + InetAddress address = InetAddress.getLocalHost(); + return address.toString(); } catch (final UnknownHostException e) { logger.warn("unknown host? ", e); throw new CloudRuntimeException("Cannot get local IP address"); } + } - final Script command = new Script("hostname", 500, logger); - final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - final String result = command.execute(parser); - final String hostname = result == null ? parser.getLine() : addr.toString(); - + protected void setupStartupCommand(final StartupCommand startup) { startup.setId(getId()); - if (startup.getName() == null) { + if (StringUtils.isBlank(startup.getName())) { + if (StringUtils.isBlank(hostname)) { + hostname = retrieveHostname(); + } startup.setName(hostname); } startup.setDataCenter(getZone()); @@ -552,85 +621,78 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } protected void reconnect(final Link link, String preferredHost, List avoidHostList, boolean forTransfer) { - if (!(forTransfer || _reconnectAllowed)) { + if (!(forTransfer || reconnectAllowed)) { return; } - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } + if (!reconnectAllowed) { + logger.debug("Reconnect requested but it is not allowed {}", () -> getLinkLog(link)); + return; } - - if (link != null) { - link.close(); - link.terminated(); - } - + cancelStartupTask(); + closeAndTerminateLink(link); + closeAndTerminateLink(this.link); setLink(null); cancelTasks(); + serverResource.disconnected(); + logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", shell.getConnectedHost(), commandsInProgress.get()); + stopAndCleanupConnection(true); + do { + final String host = shell.getNextHost(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), shell.getSslHandshakeTimeout(), this); + logger.info("Reconnecting to host: {}", host); + try { + connection.start(); + } catch (final NioConnectionException e) { + logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); + stopAndCleanupConnection(false); + } + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (!connection.isStartup()); + shell.updateConnectedHost(); + logger.info("Connected to the host: {}", shell.getConnectedHost()); + } - _resource.disconnected(); - - logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", _shell.getConnectedHost(), _inProgress.get()); - - _connection.stop(); + protected void closeAndTerminateLink(final Link link) { + if (link == null) { + return; + } + link.close(); + link.terminated(); + } + protected void stopAndCleanupConnection(boolean waitForStop) { + if (connection == null) { + return; + } + connection.stop(); try { - _connection.cleanUp(); + connection.cleanUp(); } catch (final IOException e) { logger.warn("Fail to clean up old connection. {}", e); } - - while (_connection.isStartup()) { - _shell.getBackoffAlgorithm().waitBeforeRetry(); + if (!waitForStop) { + return; } - - String host = preferredHost; - if (StringUtils.isEmpty(host)) { - host = _shell.getNextHost(); - } - do { - if (CollectionUtils.isEmpty(avoidHostList) || !avoidHostList.contains(host)) { - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Reconnecting to host:{}", host); - try { - _connection.start(); - } catch (final NioConnectionException e) { - logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } - } - } - _shell.getBackoffAlgorithm().waitBeforeRetry(); - host = _shell.getNextHost(); - } while (!_connection.isStartup()); - _shell.updateConnectedHost(); - logger.info("Connected to the host: {}", _shell.getConnectedHost()); + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (connection.isStartup()); } public void processStartupAnswer(final Answer answer, final Response response, final Link link) { - boolean cancelled = false; - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } else { - cancelled = true; - } - } + boolean answerValid = cancelStartupTask(); final StartupAnswer startup = (StartupAnswer)answer; if (!startup.getResult()) { logger.error("Not allowed to connect to the server: {}", answer.getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); } - if (cancelled) { + if (!answerValid) { logger.warn("Threw away a startup answer because we're reconnecting."); return; } @@ -641,12 +703,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater setId(startup.getHostId()); setUuid(startup.getHostUuid()); setName(startup.getHostName()); - _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. + pingInterval = startup.getPingInterval() * 1000L; // change to ms. - setLastPingResponseTime(); - scheduleWatch(link, response, _pingInterval, _pingInterval); + updateLastPingResponseTime(); + scheduleWatch(link, response, pingInterval, pingInterval); - _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); + outRequestHandler.setKeepAliveTime(2 * pingInterval, TimeUnit.MILLISECONDS); logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName()); @@ -680,7 +742,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (cmd instanceof CronCommand) { final CronCommand watch = (CronCommand)cmd; - scheduleWatch(link, request, (long)watch.getInterval() * 1000, watch.getInterval() * 1000); + scheduleWatch(link, request, watch.getInterval() * 1000L, watch.getInterval() * 1000L); answer = new Answer(cmd, true, null); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; @@ -689,10 +751,17 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (shutdown.isRemoveHost()) { cleanupAgentZoneProperties(); } - _reconnectAllowed = false; + reconnectAllowed = false; answer = new Answer(cmd, true, null); } else if (cmd instanceof ReadyCommand && ((ReadyCommand)cmd).getDetails() != null) { + logger.debug("Not ready to connect to mgt server: {}", ((ReadyCommand)cmd).getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); return; } else if (cmd instanceof MaintainCommand) { @@ -700,12 +769,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater answer = new MaintainAnswer((MaintainCommand)cmd); } else if (cmd instanceof AgentControlCommand) { answer = null; - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - answer = listener.processControlRequest(request, (AgentControlCommand)cmd); - if (answer != null) { - break; - } + for (final IAgentControlListener listener : controlListeners) { + answer = listener.processControlRequest(request, (AgentControlCommand)cmd); + if (answer != null) { + break; } } @@ -717,8 +784,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater answer = setupAgentKeystore((SetupKeyStoreCommand) cmd); } else if (cmd instanceof SetupCertificateCommand && ((SetupCertificateCommand) cmd).isHandleByAgent()) { answer = setupAgentCertificate((SetupCertificateCommand) cmd); - if (Host.Type.Routing.equals(_resource.getType())) { - scheduleServicesRestartTask(); + if (Host.Type.Routing.equals(serverResource.getType())) { + scheduleCertificateRenewalTask(); } } else if (cmd instanceof SetupMSListCommand) { answer = setupManagementServerList((SetupMSListCommand) cmd); @@ -728,11 +795,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (cmd instanceof ReadyCommand) { processReadyCommand(cmd); } - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(cmd); + answer = serverResource.executeRequest(cmd); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer == null) { logger.debug("Response: unsupported command {}", cmd.toString()); @@ -786,13 +853,13 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater final String keyStoreFile = agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME; final String csrFile = agentFile.getParent() + "/" + KeyStoreUtils.CSR_FILENAME; - String storedPassword = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + String storedPassword = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); if (StringUtils.isEmpty(storedPassword)) { storedPassword = keyStorePassword; - _shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); + shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); } - Script script = new Script(_keystoreSetupPath, 300000, logger); + Script script = new Script(keystoreSetupSetupPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(keyStoreFile); script.add(storedPassword); @@ -836,8 +903,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater throw new CloudRuntimeException("Unable to save received agent client and ca certificates", e); } - String ksPassphrase = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); - Script script = new Script(_keystoreCertImportPath, 300000, logger); + String ksPassphrase = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + Script script = new Script(keystoreCertImportScriptPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(ksPassphrase); script.add(keyStoreFile); @@ -859,9 +926,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (CollectionUtils.isNotEmpty(msList) && StringUtils.isNotEmpty(lbAlgorithm)) { try { final String newMSHosts = String.format("%s%s%s", com.cloud.utils.StringUtils.toCSVList(msList), IAgentShell.hostLbAlgorithmSeparator, lbAlgorithm); - _shell.setPersistentProperty(null, "host", newMSHosts); - _shell.setHosts(newMSHosts); - _shell.resetHostCounter(); + shell.setPersistentProperty(null, "host", newMSHosts); + shell.setHosts(newMSHosts); + shell.resetHostCounter(); logger.info("Processed new management server list: {}", newMSHosts); } catch (final Exception e) { throw new CloudRuntimeException("Could not persist received management servers list", e); @@ -870,7 +937,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if ("shuffle".equals(lbAlgorithm)) { scheduleHostLBCheckerTask(0); } else { - scheduleHostLBCheckerTask(_shell.getLbCheckerInterval(lbCheckInterval)); + scheduleHostLBCheckerTask(shell.getLbCheckerInterval(lbCheckInterval)); } } @@ -894,7 +961,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } private void migrateAgentConnection(List avoidMsList) { - final String[] msHosts = _shell.getHosts(); + final String[] msHosts = shell.getHosts(); if (msHosts == null || msHosts.length < 1) { throw new CloudRuntimeException("Management Server hosts empty, not properly configured in agent"); } @@ -908,7 +975,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater String preferredHost = null; for (String msHost : msHostsList) { try (final Socket socket = new Socket()) { - socket.connect(new InetSocketAddress(msHost, _shell.getPort()), 5000); + socket.connect(new InetSocketAddress(msHost, shell.getPort()), 5000); preferredHost = msHost; break; } catch (final IOException e) { @@ -921,9 +988,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } logger.debug("Management server host " + preferredHost + " is found to be reachable, trying to reconnect"); - _shell.resetHostCounter(); - _shell.setConnectionTransfer(true); - reconnect(_link, preferredHost, avoidMsList, true); + shell.resetHostCounter(); + shell.setConnectionTransfer(true); + reconnect(link, preferredHost, avoidMsList, true); } public void processResponse(final Response response, final Link link) { @@ -933,16 +1000,14 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater processStartupAnswer(answer, response, link); } else if (answer instanceof AgentControlAnswer) { // Notice, we are doing callback while holding a lock! - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - listener.processControlResponse(response, (AgentControlAnswer)answer); - } + for (final IAgentControlListener listener : controlListeners) { + listener.processControlResponse(response, (AgentControlAnswer)answer); } - } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && _reconnectAllowed) { + } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && reconnectAllowed) { logger.info("Management server requested startup command to reinitialize the agent"); sendStartup(link); } else { - setLastPingResponseTime(); + updateLastPingResponseTime(); } } @@ -979,22 +1044,24 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void processOtherTask(final Task task) { final Object obj = task.get(); if (obj instanceof Response) { - if (System.currentTimeMillis() - _lastPingResponseTime > _pingInterval * _shell.getPingRetries()) { - logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", _pingInterval * _shell.getPingRetries()); + if (System.currentTimeMillis() - lastPingResponseTime.get() > pingInterval * shell.getPingRetries()) { + logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", + pingInterval * shell.getPingRetries()); return; } - final PingCommand ping = _resource.getCurrentStatus(getId()); - final Request request = new Request(_id, -1, ping, false); + final PingCommand ping = serverResource.getCurrentStatus(getId()); + final Request request = new Request(id, -1, ping, false); request.setSequence(getNextSequence()); logger.debug("Sending ping: {}", request.toString()); try { task.getLink().send(request.toBytes()); //if i can send pingcommand out, means the link is ok - setLastPingResponseTime(); + updateLastPingResponseTime(); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(task.getLink()), e.getMessage(), request); } } else if (obj instanceof Request) { @@ -1004,11 +1071,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater ThreadContext.put("logcontextid", command.getContextParam("logid")); } Answer answer = null; - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(command); + answer = serverResource.executeRequest(command); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer != null) { final Response response = new Response(req, answer); @@ -1025,35 +1092,29 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public synchronized void setLastPingResponseTime() { - _lastPingResponseTime = System.currentTimeMillis(); + public void updateLastPingResponseTime() { + lastPingResponseTime.set(System.currentTimeMillis()); } - protected synchronized long getNextSequence() { - return _sequence++; + protected long getNextSequence() { + return sequence.getAndIncrement(); } @Override public void registerControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.add(listener); - } + controlListeners.add(listener); } @Override public void unregisterControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.remove(listener); - } + controlListeners.remove(listener); } @Override public AgentControlAnswer sendRequest(final AgentControlCommand cmd, final int timeoutInMilliseconds) throws AgentControlChannelException { final Request request = new Request(getId(), -1, new Command[] {cmd}, true, false); request.setSequence(getNextSequence()); - final AgentControlListener listener = new AgentControlListener(request); - registerControlListener(listener); try { postRequest(request); @@ -1064,7 +1125,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater logger.warn("sendRequest is interrupted, exit waiting"); } } - return listener.getAnswer(); } finally { unregisterControlListener(listener); @@ -1079,9 +1139,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } private void postRequest(final Request request) throws AgentControlChannelException { - if (_link != null) { + if (link != null) { try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to post agent control request: {}", request.toString()); throw new AgentControlChannelException("Unable to post agent control request due to " + e.getMessage()); @@ -1133,26 +1193,26 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public class WatchTask extends ManagedContextTimerTask { + public class WatchTask implements Runnable { protected Request _request; protected Agent _agent; - protected Link _link; + protected Link link; public WatchTask(final Link link, final Request request, final Agent agent) { super(); _request = request; - _link = link; + this.link = link; _agent = agent; } @Override - protected void runInContext() { + public void run() { logger.trace("Scheduling {}", (_request instanceof Response ? "Ping" : "Watch Task")); try { if (_request instanceof Response) { - _ugentTaskPool.submit(new ServerHandler(Task.Type.OTHER, _link, _request)); + outRequestHandler.submit(new ServerHandler(Task.Type.OTHER, link, _request)); } else { - _link.schedule(new ServerHandler(Task.Type.OTHER, _link, _request)); + link.schedule(new ServerHandler(Task.Type.OTHER, link, _request)); } } catch (final ClosedChannelException e) { logger.warn("Unable to schedule task because channel is closed"); @@ -1160,35 +1220,32 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public class StartupTask extends ManagedContextTimerTask { - protected Link _link; - protected volatile boolean cancelled = false; + public class StartupTask implements Runnable { + protected Link link; + private final AtomicBoolean cancelled = new AtomicBoolean(false); public StartupTask(final Link link) { logger.debug("Startup task created"); - _link = link; + this.link = link; } - @Override - public synchronized boolean cancel() { + public boolean cancel() { // TimerTask.cancel may fail depends on the calling context - if (!cancelled) { - cancelled = true; - _startupWait = _startupWaitDefault; + if (cancelled.compareAndSet(false, true)) { + startupWait = DEFAULT_STARTUP_WAIT; logger.debug("Startup task cancelled"); - return super.cancel(); } return true; } @Override - protected synchronized void runInContext() { - if (!cancelled) { - logger.info("The startup command is now cancelled"); - cancelled = true; - _startup = null; - _startupWait = _startupWaitDefault * 2; - reconnect(_link); + public void run() { + if (cancelled.compareAndSet(false, true)) { + logger.info("The running startup command is now invalid. Attempting reconnect"); + startupTask.set(null); + startupWait = DEFAULT_STARTUP_WAIT * 2; + logger.debug("Executing reconnect from task - {}", () -> getLinkLog(link)); + reconnect(link); } } } @@ -1219,10 +1276,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater @Override public void doTask(final Task task) throws TaskExecutionException { if (task.getType() == Task.Type.CONNECT) { - _shell.getBackoffAlgorithm().reset(); + shell.getBackoffAlgorithm().reset(); setLink(task.getLink()); - sendStartup(task.getLink(), _shell.isConnectionTransfer()); - _shell.setConnectionTransfer(false); + sendStartup(task.getLink(), shell.isConnectionTransfer()); + shell.setConnectionTransfer(false); } else if (task.getType() == Task.Type.DATA) { Request request; try { @@ -1233,7 +1290,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } else { //put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool //processRequest(request, task.getLink()); - _executor.submit(new AgentRequestHandler(getType(), getLink(), request)); + requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request)); } } catch (final ClassNotFoundException e) { logger.error("Unable to find this request "); @@ -1247,9 +1304,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater Thread.sleep(5000); } catch (InterruptedException e) { } - _shell.setConnectionTransfer(false); + shell.setConnectionTransfer(false); + logger.debug("Executing disconnect task - {}", () -> getLinkLog(task.getLink())); reconnect(task.getLink()); - return; } else if (task.getType() == Task.Type.OTHER) { processOtherTask(task); } @@ -1272,26 +1329,26 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater protected void runInContext() { while (true) { try { - if (_inProgress.get() == 0) { + if (commandsInProgress.get() == 0) { logger.debug("Running post certificate renewal task to restart services."); // Let the resource perform any post certificate renewal cleanups - _resource.executeRequest(new PostCertificateRenewalCommand()); + serverResource.executeRequest(new PostCertificateRenewalCommand()); - IAgentShell shell = agent._shell; - ServerResource resource = agent._resource.getClass().newInstance(); + IAgentShell shell = agent.shell; + ServerResource resource = agent.serverResource.getClass().getDeclaredConstructor().newInstance(); // Stop current agent agent.cancelTasks(); - agent._reconnectAllowed = false; - Runtime.getRuntime().removeShutdownHook(agent._shutdownThread); + agent.reconnectAllowed = false; + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); agent.stop(ShutdownCommand.Requested, "Restarting due to new X509 certificates"); // Nullify references for GC - agent._shell = null; - agent._watchList = null; - agent._shutdownThread = null; - agent._controlListeners = null; + agent.shell = null; + agent.watchList = null; + agent.shutdownThread = null; + agent.controlListeners = null; agent = null; // Start a new agent instance @@ -1299,7 +1356,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return; } logger.debug("Other tasks are in progress, will retry post certificate renewal command after few seconds"); - Thread.sleep(5000); } catch (final Exception e) { logger.warn("Failed to execute post certificate renewal command:", e); @@ -1314,35 +1370,34 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater @Override protected void runInContext() { try { - final String[] msList = _shell.getHosts(); + final String[] msList = shell.getHosts(); if (msList == null || msList.length < 1) { return; } final String preferredHost = msList[0]; - final String connectedHost = _shell.getConnectedHost(); - logger.trace("Running preferred host checker task, connected host={}, preferred host={}", connectedHost, preferredHost); - - if (preferredHost != null && !preferredHost.equals(connectedHost) && _link != null) { - boolean isHostUp = true; - try (final Socket socket = new Socket()) { - socket.connect(new InetSocketAddress(preferredHost, _shell.getPort()), 5000); - } catch (final IOException e) { - isHostUp = false; - logger.trace("Host: {} is not reachable", preferredHost); - - } - if (isHostUp && _link != null && _inProgress.get() == 0) { + final String connectedHost = shell.getConnectedHost(); + logger.debug("Running preferred host checker task, connected host={}, preferred host={}", + connectedHost, preferredHost); + if (preferredHost == null || preferredHost.equals(connectedHost) || link == null) { + return; + } + boolean isHostUp = false; + try (final Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(preferredHost, shell.getPort()), 5000); + isHostUp = true; + } catch (final IOException e) { + logger.debug("Host: {} is not reachable", preferredHost); + } + if (isHostUp && link != null && commandsInProgress.get() == 0) { + if (logger.isDebugEnabled()) { logger.debug("Preferred host {} is found to be reachable, trying to reconnect", preferredHost); - - _shell.resetHostCounter(); - reconnect(_link); } + shell.resetHostCounter(); + reconnect(link); } } catch (Throwable t) { logger.error("Error caught while attempting to connect to preferred host", t); } } - } - } diff --git a/agent/src/main/java/com/cloud/agent/AgentShell.java b/agent/src/main/java/com/cloud/agent/AgentShell.java index d76e5551b45..aea7fd3a8de 100644 --- a/agent/src/main/java/com/cloud/agent/AgentShell.java +++ b/agent/src/main/java/com/cloud/agent/AgentShell.java @@ -16,29 +16,6 @@ // under the License. package com.cloud.agent; -import com.cloud.agent.Agent.ExitStatus; -import com.cloud.agent.dao.StorageComponent; -import com.cloud.agent.dao.impl.PropertiesStorage; -import com.cloud.agent.properties.AgentProperties; -import com.cloud.agent.properties.AgentPropertiesFileHandler; -import com.cloud.resource.ServerResource; -import com.cloud.utils.LogUtils; -import com.cloud.utils.ProcessUtil; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; -import com.cloud.utils.backoff.impl.ConstantTimeBackoff; -import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.commons.daemon.Daemon; -import org.apache.commons.daemon.DaemonContext; -import org.apache.commons.daemon.DaemonInitException; -import org.apache.commons.lang.math.NumberUtils; -import org.apache.commons.lang3.BooleanUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.config.Configurator; - -import javax.naming.ConfigurationException; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -53,6 +30,31 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; +import javax.naming.ConfigurationException; + +import org.apache.commons.daemon.Daemon; +import org.apache.commons.daemon.DaemonContext; +import org.apache.commons.daemon.DaemonInitException; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.config.Configurator; + +import com.cloud.agent.Agent.ExitStatus; +import com.cloud.agent.dao.StorageComponent; +import com.cloud.agent.dao.impl.PropertiesStorage; +import com.cloud.agent.properties.AgentProperties; +import com.cloud.agent.properties.AgentPropertiesFileHandler; +import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; +import com.cloud.utils.ProcessUtil; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.backoff.BackoffAlgorithm; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.exception.CloudRuntimeException; + public class AgentShell implements IAgentShell, Daemon { protected static Logger LOGGER = LogManager.getLogger(AgentShell.class); @@ -415,7 +417,9 @@ public class AgentShell implements IAgentShell, Daemon { LOGGER.info("Defaulting to the constant time backoff algorithm"); _backoff = new ConstantTimeBackoff(); - _backoff.configure("ConstantTimeBackoff", new HashMap()); + Map map = new HashMap<>(); + map.put("seconds", _properties.getProperty("backoff.seconds")); + _backoff.configure("ConstantTimeBackoff", map); } private void launchAgent() throws ConfigurationException { @@ -464,6 +468,11 @@ public class AgentShell implements IAgentShell, Daemon { agent.start(); } + @Override + public Integer getSslHandshakeTimeout() { + return AgentPropertiesFileHandler.getPropertyValue(AgentProperties.SSL_HANDSHAKE_TIMEOUT); + } + public synchronized int getNextAgentId() { return _nextAgentId++; } diff --git a/agent/src/main/java/com/cloud/agent/IAgentShell.java b/agent/src/main/java/com/cloud/agent/IAgentShell.java index 0b9d9e81e95..c0ecd90ae69 100644 --- a/agent/src/main/java/com/cloud/agent/IAgentShell.java +++ b/agent/src/main/java/com/cloud/agent/IAgentShell.java @@ -74,4 +74,6 @@ public interface IAgentShell { boolean isConnectionTransfer(); void setConnectionTransfer(boolean connectionTransfer); + + Integer getSslHandshakeTimeout(); } diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 52679811f7c..61cd27fff77 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -383,7 +383,7 @@ public class AgentProperties{ /** * This param will set the CPU architecture for the domain to override what the management server would send.
* In case of arm64 (aarch64), this will change the machine type to 'virt' and add a SCSI and a USB controller in the domain XML.
- * Possible values: x86_64 | aarch64
+ * Possible values: x86_64 | aarch64 | s390x
* Data type: String.
* Default value: null (will set use the architecture of the VM's OS). */ @@ -811,6 +811,13 @@ public class AgentProperties{ */ public static final Property HOST_TAGS = new Property<>("host.tags", null, String.class); + /** + * Timeout for SSL handshake in seconds + * Data type: Integer.
+ * Default value: null + */ + public static final Property SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", null, Integer.class); + public static class Property { private String name; private T defaultValue; diff --git a/agent/src/test/java/com/cloud/agent/AgentShellTest.java b/agent/src/test/java/com/cloud/agent/AgentShellTest.java index 4126692546f..6d9758cc3dc 100644 --- a/agent/src/test/java/com/cloud/agent/AgentShellTest.java +++ b/agent/src/test/java/com/cloud/agent/AgentShellTest.java @@ -362,4 +362,11 @@ public class AgentShellTest { Assert.assertEquals(expected, shell.getConnectedHost()); } + + @Test + public void testGetSslHandshakeTimeout() { + Integer expected = 1; + agentPropertiesFileHandlerMocked.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.eq(AgentProperties.SSL_HANDSHAKE_TIMEOUT))).thenReturn(expected); + Assert.assertEquals(expected, agentShellSpy.getSslHandshakeTimeout()); + } } diff --git a/agent/src/test/java/com/cloud/agent/AgentTest.java b/agent/src/test/java/com/cloud/agent/AgentTest.java new file mode 100644 index 00000000000..65dc030ebd7 --- /dev/null +++ b/agent/src/test/java/com/cloud/agent/AgentTest.java @@ -0,0 +1,257 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import javax.naming.ConfigurationException; + +import org.apache.logging.log4j.Logger; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.resource.ServerResource; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.nio.Link; +import com.cloud.utils.nio.NioConnection; + +@RunWith(MockitoJUnitRunner.class) +public class AgentTest { + Agent agent; + private AgentShell shell; + private ServerResource serverResource; + private Logger logger; + + @Before + public void setUp() throws ConfigurationException { + shell = mock(AgentShell.class); + serverResource = mock(ServerResource.class); + doReturn(true).when(serverResource).configure(any(), any()); + doReturn(1).when(shell).getWorkers(); + doReturn(1).when(shell).getPingRetries(); + agent = new Agent(shell, 1, serverResource); + logger = mock(Logger.class); + ReflectionTestUtils.setField(agent, "logger", logger); + } + + @Test + public void testGetLinkLogNullLinkReturnsEmptyString() { + Link link = null; + String result = agent.getLinkLog(link); + assertEquals("", result); + } + + @Test + public void testGetLinkLogLinkWithTraceEnabledReturnsLinkLogWithHashCode() { + Link link = mock(Link.class); + InetSocketAddress socketAddress = new InetSocketAddress("192.168.1.100", 1111); + when(link.getSocketAddress()).thenReturn(socketAddress); + when(logger.isTraceEnabled()).thenReturn(true); + + String result = agent.getLinkLog(link); + System.out.println(result); + assertTrue(result.startsWith(System.identityHashCode(link) + "-")); + assertTrue(result.contains("192.168.1.100")); + } + + @Test + public void testGetAgentNameWhenServerResourceIsNull() { + ReflectionTestUtils.setField(agent, "serverResource", null); + assertEquals("Agent", agent.getAgentName()); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsTrue() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(true); + when(serverResource.getName()).thenReturn("TestAgent"); + + String agentName = agent.getAgentName(); + assertEquals("TestAgent", agentName); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsFalse() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(false); + + String agentName = agent.getAgentName(); + assertEquals("Agent", agentName); + } + + @Test + public void testAgentInitialization() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + when(shell.getPingRetries()).thenReturn(3); + when(shell.getWorkers()).thenReturn(5); + agent.setupShutdownHookAndInitExecutors(); + assertNotNull(agent.selfTaskExecutor); + assertNotNull(agent.outRequestHandler); + assertNotNull(agent.requestHandler); + } + + @Test + public void testAgentShutdownHookAdded() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + agent.setupShutdownHookAndInitExecutors(); + verify(logger).trace("Adding shutdown hook"); + } + + @Test + public void testGetResourceGuidValidGuidAndResourceName() { + when(shell.getGuid()).thenReturn("12345"); + String result = agent.getResourceGuid(); + assertTrue(result.startsWith("12345-" + ServerResource.class.getSimpleName())); + } + + @Test + public void testGetZoneReturnsValidZone() { + when(shell.getZone()).thenReturn("ZoneA"); + String result = agent.getZone(); + assertEquals("ZoneA", result); + } + + @Test + public void testGetPodReturnsValidPod() { + when(shell.getPod()).thenReturn("PodA"); + String result = agent.getPod(); + assertEquals("PodA", result); + } + + @Test + public void testSetLinkAssignsLink() { + Link mockLink = mock(Link.class); + agent.setLink(mockLink); + assertEquals(mockLink, agent.link); + } + + @Test + public void testGetResourceReturnsServerResource() { + ServerResource mockResource = mock(ServerResource.class); + ReflectionTestUtils.setField(agent, "serverResource", mockResource); + ServerResource result = agent.getResource(); + assertSame(mockResource, result); + } + + @Test + public void testGetResourceName() { + String result = agent.getResourceName(); + assertTrue(result.startsWith(ServerResource.class.getSimpleName())); + } + + @Test + public void testUpdateLastPingResponseTimeUpdatesCurrentTime() { + long beforeUpdate = System.currentTimeMillis(); + agent.updateLastPingResponseTime(); + long updatedTime = agent.lastPingResponseTime.get(); + assertTrue(updatedTime >= beforeUpdate); + assertTrue(updatedTime <= System.currentTimeMillis()); + } + + @Test + public void testGetNextSequenceIncrementsSequence() { + long initialSequence = agent.getNextSequence(); + long nextSequence = agent.getNextSequence(); + assertEquals(initialSequence + 1, nextSequence); + long thirdSequence = agent.getNextSequence(); + assertEquals(nextSequence + 1, thirdSequence); + } + + @Test + public void testRegisterControlListenerAddsListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + } + + @Test + public void testUnregisterControlListenerRemovesListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + agent.unregisterControlListener(listener); + assertFalse(agent.controlListeners.contains(listener)); + } + + @Test + public void testCloseAndTerminateLinkLinkIsNullDoesNothing() { + agent.closeAndTerminateLink(null); + } + + @Test + public void testCloseAndTerminateLinkValidLinkCallsCloseAndTerminate() { + Link mockLink = mock(Link.class); + agent.closeAndTerminateLink(mockLink); + verify(mockLink).close(); + verify(mockLink).terminated(); + } + + @Test + public void testStopAndCleanupConnectionConnectionIsNullDoesNothing() { + agent.connection = null; + agent.stopAndCleanupConnection(false); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionNoWaitStopsAndCleansUp() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + } + + @Test + public void testStopAndCleanupConnectionCleanupThrowsIOExceptionLogsWarning() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + doThrow(new IOException("Cleanup failed")).when(mockConnection).cleanUp(); + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(logger).warn(eq("Fail to clean up old connection. {}"), any(IOException.class)); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionWaitForStopWaitsForStartupToStop() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + ConstantTimeBackoff mockBackoff = mock(ConstantTimeBackoff.class); + mockBackoff.setTimeToWait(0); + agent.connection = mockConnection; + when(shell.getBackoffAlgorithm()).thenReturn(mockBackoff); + when(mockConnection.isStartup()).thenReturn(true, true, false); + agent.stopAndCleanupConnection(true); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + verify(mockBackoff, times(3)).waitBeforeRetry(); + } +} diff --git a/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java b/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java index d86eb2a3a7f..18737c584b3 100644 --- a/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java @@ -27,6 +27,7 @@ public class RemoteInstanceTO implements Serializable { private Hypervisor.HypervisorType hypervisorType; private String instanceName; + private String instancePath; // VMware Remote Instances parameters (required for exporting OVA through ovftool) // TODO: cloud.agent.transport.Request#getCommands() cannot handle gsoc decode for polymorphic classes @@ -44,9 +45,10 @@ public class RemoteInstanceTO implements Serializable { this.instanceName = instanceName; } - public RemoteInstanceTO(String instanceName, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) { + public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) { this.hypervisorType = Hypervisor.HypervisorType.VMware; this.instanceName = instanceName; + this.instancePath = instancePath; this.vcenterHost = vcenterHost; this.vcenterUsername = vcenterUsername; this.vcenterPassword = vcenterPassword; @@ -61,6 +63,10 @@ public class RemoteInstanceTO implements Serializable { return this.instanceName; } + public String getInstancePath() { + return this.instancePath; + } + public String getVcenterUsername() { return vcenterUsername; } diff --git a/api/src/main/java/com/cloud/configuration/Resource.java b/api/src/main/java/com/cloud/configuration/Resource.java index bf8fca9d905..c7bf44de76c 100644 --- a/api/src/main/java/com/cloud/configuration/Resource.java +++ b/api/src/main/java/com/cloud/configuration/Resource.java @@ -21,7 +21,7 @@ public interface Resource { short RESOURCE_UNLIMITED = -1; String UNLIMITED = "Unlimited"; - enum ResourceType { // Primary and Secondary storage are allocated_storage and not the physical storage. + enum ResourceType { // All storage type resources are allocated_storage and not the physical storage. user_vm("user_vm", 0), public_ip("public_ip", 1), volume("volume", 2), @@ -33,7 +33,11 @@ public interface Resource { cpu("cpu", 8), memory("memory", 9), primary_storage("primary_storage", 10), - secondary_storage("secondary_storage", 11); + secondary_storage("secondary_storage", 11), + backup("backup", 12), + backup_storage("backup_storage", 13), + bucket("bucket", 14), + object_storage("object_storage", 15); private String name; private int ordinal; @@ -62,6 +66,10 @@ public interface Resource { } return null; } + + public static Boolean isStorageType(ResourceType type) { + return (type == primary_storage || type == secondary_storage || type == backup_storage || type == object_storage); + } } public static class ResourceOwnerType { diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 81ed185dae5..862a6e21fa8 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -785,6 +785,9 @@ public class EventTypes { public static final String EVENT_SHAREDFS_EXPUNGE = "SHAREDFS.EXPUNGE"; public static final String EVENT_SHAREDFS_RECOVER = "SHAREDFS.RECOVER"; + // Resource Limit + public static final String EVENT_RESOURCE_LIMIT_UPDATE = "RESOURCE.LIMIT.UPDATE"; + static { // TODO: need a way to force author adding event types to declare the entity details as well, with out braking diff --git a/api/src/main/java/com/cloud/exception/StorageAccessException.java b/api/src/main/java/com/cloud/exception/StorageAccessException.java index eefbcf5518a..d54d77d66f1 100644 --- a/api/src/main/java/com/cloud/exception/StorageAccessException.java +++ b/api/src/main/java/com/cloud/exception/StorageAccessException.java @@ -26,7 +26,7 @@ import com.cloud.utils.SerialVersionUID; public class StorageAccessException extends RuntimeException { private static final long serialVersionUID = SerialVersionUID.StorageAccessException; - public StorageAccessException(String message) { - super(message); + public StorageAccessException(String message, Exception causer) { + super(message, causer); } } diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index c997f5e1dbf..05b8b3ab7a8 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -16,14 +16,10 @@ // under the License. package com.cloud.storage; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - import org.apache.commons.lang.NotImplementedException; -import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.List; public class Storage { public static enum ImageFormat { @@ -139,6 +135,21 @@ public class Storage { ISODISK /* Template corresponding to a iso (non root disk) present in an OVA */ } + public enum EncryptionSupport { + /** + * Encryption not supported. + */ + Unsupported, + /** + * Will use hypervisor encryption driver (qemu -> luks) + */ + Hypervisor, + /** + * Storage pool handles encryption and just provides an encrypted volume + */ + Storage + } + /** * StoragePoolTypes carry some details about the format and capabilities of a storage pool. While not necessarily a * 1:1 with PrimaryDataStoreDriver (and for KVM agent, KVMStoragePool and StorageAdaptor) implementations, it is @@ -150,61 +161,37 @@ public class Storage { * ensure this is available on the agent side as well. This is best done by defining the StoragePoolType in a common * package available on both management server and agent plugin jars. */ - public static class StoragePoolType { - private static final Map map = new LinkedHashMap<>(); + public static enum StoragePoolType { + Filesystem(false, true, EncryptionSupport.Hypervisor), // local directory + NetworkFilesystem(true, true, EncryptionSupport.Hypervisor), // NFS + IscsiLUN(true, false, EncryptionSupport.Unsupported), // shared LUN, with a clusterfs overlay + Iscsi(true, false, EncryptionSupport.Unsupported), // for e.g., ZFS Comstar + ISO(false, false, EncryptionSupport.Unsupported), // for iso image + LVM(false, false, EncryptionSupport.Unsupported), // XenServer local LVM SR + CLVM(true, false, EncryptionSupport.Unsupported), + RBD(true, true, EncryptionSupport.Unsupported), // http://libvirt.org/storage.html#StorageBackendRBD + SharedMountPoint(true, true, EncryptionSupport.Hypervisor), + VMFS(true, true, EncryptionSupport.Unsupported), // VMware VMFS storage + PreSetup(true, true, EncryptionSupport.Unsupported), // for XenServer, Storage Pool is set up by customers. + EXT(false, true, EncryptionSupport.Unsupported), // XenServer local EXT SR + OCFS2(true, false, EncryptionSupport.Unsupported), + SMB(true, false, EncryptionSupport.Unsupported), + Gluster(true, false, EncryptionSupport.Unsupported), + PowerFlex(true, true, EncryptionSupport.Hypervisor), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS) + ManagedNFS(true, false, EncryptionSupport.Unsupported), + Linstor(true, true, EncryptionSupport.Storage), + DatastoreCluster(true, true, EncryptionSupport.Unsupported), // for VMware, to abstract pool of clusters + StorPool(true, true, EncryptionSupport.Hypervisor), + FiberChannel(true, true, EncryptionSupport.Unsupported); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-) - public static final StoragePoolType Filesystem = new StoragePoolType("Filesystem", false, true, true); - public static final StoragePoolType NetworkFilesystem = new StoragePoolType("NetworkFilesystem", true, true, true); - public static final StoragePoolType IscsiLUN = new StoragePoolType("IscsiLUN", true, false, false); - public static final StoragePoolType Iscsi = new StoragePoolType("Iscsi", true, false, false); - public static final StoragePoolType ISO = new StoragePoolType("ISO", false, false, false); - public static final StoragePoolType LVM = new StoragePoolType("LVM", false, false, false); - public static final StoragePoolType CLVM = new StoragePoolType("CLVM", true, false, false); - public static final StoragePoolType RBD = new StoragePoolType("RBD", true, true, false); - public static final StoragePoolType SharedMountPoint = new StoragePoolType("SharedMountPoint", true, true, true); - public static final StoragePoolType VMFS = new StoragePoolType("VMFS", true, true, false); - public static final StoragePoolType PreSetup = new StoragePoolType("PreSetup", true, true, false); - public static final StoragePoolType EXT = new StoragePoolType("EXT", false, true, false); - public static final StoragePoolType OCFS2 = new StoragePoolType("OCFS2", true, false, false); - public static final StoragePoolType SMB = new StoragePoolType("SMB", true, false, false); - public static final StoragePoolType Gluster = new StoragePoolType("Gluster", true, false, false); - public static final StoragePoolType PowerFlex = new StoragePoolType("PowerFlex", true, true, true); - public static final StoragePoolType ManagedNFS = new StoragePoolType("ManagedNFS", true, false, false); - public static final StoragePoolType Linstor = new StoragePoolType("Linstor", true, true, false); - public static final StoragePoolType DatastoreCluster = new StoragePoolType("DatastoreCluster", true, true, false); - public static final StoragePoolType StorPool = new StoragePoolType("StorPool", true,true,true); - public static final StoragePoolType FiberChannel = new StoragePoolType("FiberChannel", true,true,false); - - - private final String name; private final boolean shared; private final boolean overProvisioning; - private final boolean encryption; + private final EncryptionSupport encryption; - /** - * New StoragePoolType, set the name to check with it in Dao (Note: Do not register it into the map of pool types). - * @param name name of the StoragePoolType. - */ - public StoragePoolType(String name) { - this.name = name; - this.shared = false; - this.overProvisioning = false; - this.encryption = false; - } - - /** - * Define a new StoragePoolType, and register it into the map of pool types known to the management server. - * @param name Simple unique name of the StoragePoolType. - * @param shared Storage pool is shared/accessible to multiple hypervisors - * @param overProvisioning Storage pool supports overProvisioning - * @param encryption Storage pool supports encrypted volumes - */ - public StoragePoolType(String name, boolean shared, boolean overProvisioning, boolean encryption) { - this.name = name; + StoragePoolType(boolean shared, boolean overProvisioning, EncryptionSupport encryption) { this.shared = shared; this.overProvisioning = overProvisioning; this.encryption = encryption; - addStoragePoolType(this); } public boolean isShared() { @@ -216,50 +203,12 @@ public class Storage { } public boolean supportsEncryption() { + return encryption == EncryptionSupport.Hypervisor || encryption == EncryptionSupport.Storage; + } + + public EncryptionSupport encryptionSupportMode() { return encryption; } - - private static void addStoragePoolType(StoragePoolType storagePoolType) { - map.putIfAbsent(storagePoolType.name, storagePoolType); - } - - public static StoragePoolType[] values() { - return map.values().toArray(StoragePoolType[]::new).clone(); - } - - public static StoragePoolType valueOf(String name) { - if (StringUtils.isBlank(name)) { - return null; - } - - StoragePoolType storage = map.get(name); - if (storage == null) { - throw new IllegalArgumentException("StoragePoolType '" + name + "' not found"); - } - return storage; - } - - @Override - public String toString() { - return name; - } - - public String name() { - return name; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - StoragePoolType that = (StoragePoolType) o; - return Objects.equals(name, that.name); - } - - @Override - public int hashCode() { - return Objects.hash(name); - } } public static List getNonSharedStoragePoolTypes() { diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 6f4c7aa09e2..b7b5423244c 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -22,7 +22,12 @@ import java.net.MalformedURLException; import java.util.List; import java.util.Map; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.offering.DiskOffering; +import com.cloud.user.Account; import com.cloud.utils.Pair; +import com.cloud.utils.fsm.NoTransitionException; + import org.apache.cloudstack.api.command.user.volume.AssignVolumeCmd; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ChangeOfferingForVolumeCmd; @@ -37,13 +42,9 @@ import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.framework.config.ConfigKey; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.user.Account; -import com.cloud.utils.fsm.NoTransitionException; - public interface VolumeApiService { - ConfigKey ConcurrentMigrationsThresholdPerDatastore = new ConfigKey("Advanced" + ConfigKey ConcurrentMigrationsThresholdPerDatastore = new ConfigKey<>("Advanced" , Long.class , "concurrent.migrations.per.target.datastore" , "0" @@ -51,7 +52,7 @@ public interface VolumeApiService { , true // not sure if this is to be dynamic , ConfigKey.Scope.Global); - ConfigKey UseHttpsToUpload = new ConfigKey("Advanced", + ConfigKey UseHttpsToUpload = new ConfigKey<>("Advanced", Boolean.class, "use.https.to.upload", "true", @@ -85,7 +86,7 @@ public interface VolumeApiService { * @param cmd * the API command wrapping the criteria * @return the volume object - * @throws ResourceAllocationException + * @throws ResourceAllocationException no capacity to allocate the new volume size */ Volume resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException; @@ -139,13 +140,13 @@ public interface VolumeApiService { Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName) throws ResourceAllocationException; /** - * Checks if the target storage supports the disk offering. + * Checks if the storage pool supports the disk offering tags. * This validation is consistent with the mechanism used to select a storage pool to deploy a volume when a virtual machine is deployed or when a data disk is allocated. * * The scenarios when this method returns true or false is presented in the following table. * * - * + * * * * @@ -169,7 +170,8 @@ public interface VolumeApiService { * *
#Disk offering tagsStorage tagsDoes the storage support the disk offering?#Disk offering diskOfferingTagsStorage diskOfferingTagsDoes the storage support the disk offering?
*/ - boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags); + boolean doesStoragePoolSupportDiskOffering(StoragePool destPool, DiskOffering diskOffering); + boolean doesStoragePoolSupportDiskOfferingTags(StoragePool destPool, String diskOfferingTags); Volume destroyVolume(long volumeId, Account caller, boolean expunge, boolean forceExpunge); @@ -190,4 +192,6 @@ public interface VolumeApiService { boolean stateTransitTo(Volume vol, Volume.Event event) throws NoTransitionException; Pair checkAndRepairVolume(CheckAndRepairVolumeCmd cmd) throws ResourceAllocationException; + + Long getVolumePhysicalSize(Storage.ImageFormat format, String path, String chainInfo); } diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java index 68204d43253..f041c8342ae 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java @@ -30,6 +30,11 @@ public interface RoleService { ConfigKey EnableDynamicApiChecker = new ConfigKey<>("Advanced", Boolean.class, "dynamic.apichecker.enabled", "false", "If set to true, this enables the dynamic role-based api access checker and disables the default static role-based api access checker.", true); + ConfigKey DynamicApiCheckerCachePeriod = new ConfigKey<>("Advanced", Integer.class, + "dynamic.apichecker.cache.period", "0", + "Defines the expiration time in seconds for the Dynamic API Checker cache, determining how long cached data is retained before being refreshed. If set to zero then caching will be disabled", + false); + boolean isEnabled(); /** diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 03de07c37da..3e8b329cac7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -51,12 +51,19 @@ public class ApiConstants { public static final String AVAILABLE = "available"; public static final String AVAILABLE_SUBNETS = "availablesubnets"; public static final String AVAILABLE_VIRTUAL_MACHINE_COUNT = "availablevirtualmachinecount"; + public static final String BACKUP_AVAILABLE = "backupavailable"; public static final String BACKUP_ID = "backupid"; + public static final String BACKUP_LIMIT = "backuplimit"; public static final String BACKUP_OFFERING_NAME = "backupofferingname"; public static final String BACKUP_OFFERING_ID = "backupofferingid"; + public static final String BACKUP_STORAGE_AVAILABLE = "backupstorageavailable"; + public static final String BACKUP_STORAGE_LIMIT = "backupstoragelimit"; + public static final String BACKUP_STORAGE_TOTAL = "backupstoragetotal"; + public static final String BACKUP_TOTAL = "backuptotal"; public static final String BASE64_IMAGE = "base64image"; public static final String BGP_PEERS = "bgppeers"; public static final String BGP_PEER_IDS = "bgppeerids"; + public static final String BATCH_SIZE = "batchsize"; public static final String BITS = "bits"; public static final String BOOTABLE = "bootable"; public static final String BIND_DN = "binddn"; @@ -322,6 +329,7 @@ public class ApiConstants { public static final String MAC_ADDRESS = "macaddress"; public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; + public static final String MAX_BACKUPS = "maxbackups"; public static final String MAX_CPU_NUMBER = "maxcpunumber"; public static final String MAX_MEMORY = "maxmemory"; public static final String MIN_CPU_NUMBER = "mincpunumber"; @@ -436,6 +444,7 @@ public class ApiConstants { public static final String QUALIFIERS = "qualifiers"; public static final String QUERY_FILTER = "queryfilter"; public static final String SCHEDULE = "schedule"; + public static final String SCHEDULE_ID = "scheduleid"; public static final String SCOPE = "scope"; public static final String SEARCH_BASE = "searchbase"; public static final String SECONDARY_IP = "secondaryip"; @@ -447,7 +456,6 @@ public class ApiConstants { public static final String SENT = "sent"; public static final String SENT_BYTES = "sentbytes"; public static final String SERIAL = "serial"; - public static final String SERVICE_IP = "serviceip"; public static final String SERVICE_OFFERING_ID = "serviceofferingid"; public static final String SESSIONKEY = "sessionkey"; public static final String SHOW_CAPACITIES = "showcapacities"; @@ -477,11 +485,12 @@ public class ApiConstants { public static final String STATE = "state"; public static final String STATS = "stats"; public static final String STATUS = "status"; - public static final String STORAGE_TYPE = "storagetype"; - public static final String STORAGE_POLICY = "storagepolicy"; - public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled"; public static final String STORAGE_CAPABILITIES = "storagecapabilities"; public static final String STORAGE_CUSTOM_STATS = "storagecustomstats"; + public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled"; + public static final String STORAGE_POLICY = "storagepolicy"; + public static final String STORAGE_POOL = "storagepool"; + public static final String STORAGE_TYPE = "storagetype"; public static final String SUBNET = "subnet"; public static final String OWNER = "owner"; public static final String SWAP_OWNER = "swapowner"; @@ -954,7 +963,6 @@ public class ApiConstants { public static final String AUTOSCALE_VMGROUP_NAME = "autoscalevmgroupname"; public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername"; public static final String BAREMETAL_RCT_URL = "baremetalrcturl"; - public static final String BATCH_SIZE = "batchsize"; public static final String UCS_DN = "ucsdn"; public static final String GSLB_PROVIDER = "gslbprovider"; public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider"; @@ -1148,7 +1156,6 @@ public class ApiConstants { public static final String MTU = "mtu"; public static final String AUTO_ENABLE_KVM_HOST = "autoenablekvmhost"; public static final String LIST_APIS = "listApis"; - public static final String OBJECT_STORAGE_ID = "objectstorageid"; public static final String VERSIONING = "versioning"; public static final String OBJECT_LOCKING = "objectlocking"; public static final String ENCRYPTION = "encryption"; @@ -1162,7 +1169,6 @@ public class ApiConstants { public static final String DISK_PATH = "diskpath"; public static final String IMPORT_SOURCE = "importsource"; public static final String TEMP_PATH = "temppath"; - public static final String OBJECT_STORAGE = "objectstore"; public static final String HEURISTIC_RULE = "heuristicrule"; public static final String HEURISTIC_TYPE_VALID_OPTIONS = "Valid options are: ISO, SNAPSHOT, TEMPLATE and VOLUME."; public static final String MANAGEMENT = "management"; @@ -1190,6 +1196,16 @@ public class ApiConstants { public static final String SHAREDFSVM_MIN_CPU_COUNT = "sharedfsvmmincpucount"; public static final String SHAREDFSVM_MIN_RAM_SIZE = "sharedfsvmminramsize"; + // Object Storage related + public static final String BUCKET_AVAILABLE = "bucketavailable"; + public static final String BUCKET_LIMIT = "bucketlimit"; + public static final String BUCKET_TOTAL = "buckettotal"; + public static final String OBJECT_STORAGE_ID = "objectstorageid"; + public static final String OBJECT_STORAGE = "objectstore"; + public static final String OBJECT_STORAGE_AVAILABLE = "objectstorageavailable"; + public static final String OBJECT_STORAGE_LIMIT = "objectstoragelimit"; + public static final String OBJECT_STORAGE_TOTAL = "objectstoragetotal"; + public static final String PARAMETER_DESCRIPTION_ACTIVATION_RULE = "Quota tariff's activation rule. It can receive a JS script that results in either " + "a boolean or a numeric value: if it results in a boolean value, the tariff value will be applied according to the result; if it results in a numeric value, the " + "numeric value will be applied; if the result is neither a boolean nor a numeric value, the tariff will not be applied. If the rule is not informed, the tariff " + @@ -1203,6 +1219,8 @@ public class ApiConstants { "however, the following formats are also accepted: \"yyyy-MM-dd HH:mm:ss\" (e.g.: \"2023-01-01 12:00:00\") and \"yyyy-MM-dd\" (e.g.: \"2023-01-01\" - if the time is not " + "added, it will be interpreted as \"23:59:59\"). If the recommended format is not used, the date will be considered in the server timezone."; + public static final String VMWARE_DC = "vmwaredc"; + /** * This enum specifies IO Drivers, each option controls specific policies on I/O. * Qemu guests support "threads" and "native" options Since 0.8.8 ; "io_uring" is supported Since 6.3.0 (QEMU 5.0). diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java index b91e56dcaef..895e9328992 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java @@ -100,7 +100,7 @@ public class ListDomainsCmd extends BaseListCmd implements UserCmd { dv = EnumSet.of(DomainDetails.all); } else { try { - ArrayList dc = new ArrayList(); + ArrayList dc = new ArrayList<>(); for (String detail : viewDetails) { dc.add(DomainDetails.valueOf(detail)); } @@ -142,7 +142,10 @@ public class ListDomainsCmd extends BaseListCmd implements UserCmd { if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java index e94bff1fce8..fcd6b03d3e5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java @@ -29,7 +29,7 @@ import org.apache.cloudstack.api.response.IsAccountAllowedToCreateOfferingsWithT responseObject = IsAccountAllowedToCreateOfferingsWithTagsResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class IsAccountAllowedToCreateOfferingsWithTagsCmd extends BaseCmd { - @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = AccountResponse.class, description = "Account UUID") + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = AccountResponse.class, description = "Account UUID", required = true) private Long id; @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java index 9157188fdee..bd9ab30f4f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java @@ -157,7 +157,10 @@ public class ListAccountsCmd extends BaseListDomainResourcesCmd implements UserC if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java index 558f92e4006..2d387788243 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.backup; import javax.inject.Inject; +import com.cloud.storage.Snapshot; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -27,6 +28,7 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.backup.BackupManager; @@ -60,6 +62,13 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { description = "ID of the VM") private Long vmId; + @Parameter(name = ApiConstants.SCHEDULE_ID, + type = CommandType.LONG, + entityType = BackupScheduleResponse.class, + description = "backup schedule ID of the VM, if this is null, it indicates that it is a manual backup.", + since = "4.21.0") + private Long scheduleId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -68,6 +77,14 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { return vmId; } + public Long getScheduleId() { + if (scheduleId != null) { + return scheduleId; + } else { + return Snapshot.MANUAL_POLICY_ID; + } + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -75,7 +92,7 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - boolean result = backupManager.createBackup(getVmId()); + boolean result = backupManager.createBackup(getVmId(), getScheduleId()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java index 5dc06af2123..1d0741e6217 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java @@ -75,6 +75,12 @@ public class CreateBackupScheduleCmd extends BaseCmd { description = "Specifies a timezone for this command. For more information on the timezone parameter, see TimeZone Format.") private String timezone; + @Parameter(name = ApiConstants.MAX_BACKUPS, + type = CommandType.INTEGER, + description = "maximum number of backups to retain", + since = "4.21.0") + private Integer maxBackups; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -95,6 +101,10 @@ public class CreateBackupScheduleCmd extends BaseCmd { return timezone; } + public Integer getMaxBackups() { + return maxBackups; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java index d2c91e57871..722556b8e2d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java @@ -72,7 +72,7 @@ public class CreateBucketCmd extends BaseAsyncCreateCmd implements UserCmd { description = "Id of the Object Storage Pool where bucket is created") private long objectStoragePoolId; - @Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER,description = "Bucket Quota in GB") + @Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER, required = true, description = "Bucket Quota in GiB") private Integer quota; @Parameter(name = ApiConstants.ENCRYPTION, type = CommandType.BOOLEAN, description = "Enable bucket encryption") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java index 8e281b20e91..f913373c04b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java @@ -56,7 +56,7 @@ public class UpdateBucketCmd extends BaseCmd { @Parameter(name = ApiConstants.POLICY, type = CommandType.STRING, description = "Bucket Access Policy") private String policy; - @Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER,description = "Bucket Quota in GB") + @Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER, description = "Bucket Quota in GiB") private Integer quota; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index 56c818f832b..efccb5c09b0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.firewall; import java.util.ArrayList; import java.util.List; +import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -40,6 +41,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; import com.cloud.network.rules.FirewallRule; import com.cloud.user.Account; +import com.cloud.utils.StringUtils; import com.cloud.utils.net.NetUtils; @APICommand(name = "createFirewallRule", description = "Creates a firewall rule for a given IP address", responseObject = FirewallResponse.class, entityType = {FirewallRule.class}, @@ -125,14 +127,13 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal @Override public List getSourceCidrList() { - if (cidrlist != null) { + if (CollectionUtils.isNotEmpty(cidrlist) && !(cidrlist.size() == 1 && StringUtils.isBlank(cidrlist.get(0)))) { return cidrlist; } else { - List oneCidrList = new ArrayList(); + List oneCidrList = new ArrayList<>(); oneCidrList.add(NetUtils.ALL_IP4_CIDRS); return oneCidrList; } - } // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AccountResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AccountResponse.java index 6fc098295f6..aaad7f985fc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AccountResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AccountResponse.java @@ -127,6 +127,30 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou @Param(description = "the total number of snapshots available for this account") private String snapshotAvailable; + @SerializedName(ApiConstants.BACKUP_LIMIT) + @Param(description = "the total number of backups which can be stored by this account", since = "4.21.0") + private String backupLimit; + + @SerializedName(ApiConstants.BACKUP_TOTAL) + @Param(description = "the total number of backups stored by this account", since = "4.21.0") + private Long backupTotal; + + @SerializedName(ApiConstants.BACKUP_AVAILABLE) + @Param(description = "the total number of backups available to this account", since = "4.21.0") + private String backupAvailable; + + @SerializedName(ApiConstants.BACKUP_STORAGE_LIMIT) + @Param(description = "the total backup storage space (in GiB) the account can own", since = "4.21.0") + private String backupStorageLimit; + + @SerializedName(ApiConstants.BACKUP_STORAGE_TOTAL) + @Param(description = "the total backup storage space (in GiB) owned by the account", since = "4.21.0") + private Long backupStorageTotal; + + @SerializedName(ApiConstants.BACKUP_STORAGE_AVAILABLE) + @Param(description = "the total backup storage space (in GiB) available to the account", since = "4.21.0") + private String backupStorageAvailable; + @SerializedName("templatelimit") @Param(description = "the total number of templates which can be created by this account") private String templateLimit; @@ -231,6 +255,30 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou @Param(description = "the total secondary storage space (in GiB) available to be used for this account", since = "4.2.0") private String secondaryStorageAvailable; + @SerializedName(ApiConstants.BUCKET_LIMIT) + @Param(description = "the total number of buckets which can be stored by this account", since = "4.21.0") + private String bucketLimit; + + @SerializedName(ApiConstants.BUCKET_TOTAL) + @Param(description = "the total number of buckets stored by this account", since = "4.21.0") + private Long bucketTotal; + + @SerializedName(ApiConstants.BUCKET_AVAILABLE) + @Param(description = "the total number of buckets available to this account", since = "4.21.0") + private String bucketAvailable; + + @SerializedName(ApiConstants.OBJECT_STORAGE_LIMIT) + @Param(description = "the total object storage space (in GiB) the account can own", since = "4.21.0") + private String objectStorageLimit; + + @SerializedName(ApiConstants.OBJECT_STORAGE_TOTAL) + @Param(description = "the total object storage space (in GiB) owned by the account", since = "4.21.0") + private Long objectStorageTotal; + + @SerializedName(ApiConstants.OBJECT_STORAGE_AVAILABLE) + @Param(description = "the total object storage space (in GiB) available to the account", since = "4.21.0") + private String objectStorageAvailable; + @SerializedName(ApiConstants.STATE) @Param(description = "the state of the account") private String state; @@ -386,6 +434,36 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou this.snapshotAvailable = snapshotAvailable; } + @Override + public void setBackupLimit(String backupLimit) { + this.backupLimit = backupLimit; + } + + @Override + public void setBackupTotal(Long backupTotal) { + this.backupTotal = backupTotal; + } + + @Override + public void setBackupAvailable(String backupAvailable) { + this.backupAvailable = backupAvailable; + } + + @Override + public void setBackupStorageLimit(String backupStorageLimit) { + this.backupStorageLimit = backupStorageLimit; + } + + @Override + public void setBackupStorageTotal(Long backupStorageTotal) { + this.backupStorageTotal = backupStorageTotal; + } + + @Override + public void setBackupStorageAvailable(String backupStorageAvailable) { + this.backupStorageAvailable = backupStorageAvailable; + } + @Override public void setTemplateLimit(String templateLimit) { this.templateLimit = templateLimit; @@ -537,6 +615,36 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou this.secondaryStorageAvailable = secondaryStorageAvailable; } + @Override + public void setBucketLimit(String bucketLimit) { + this.bucketLimit = bucketLimit; + } + + @Override + public void setBucketTotal(Long bucketTotal) { + this.bucketTotal = bucketTotal; + } + + @Override + public void setBucketAvailable(String bucketAvailable) { + this.bucketAvailable = bucketAvailable; + } + + @Override + public void setObjectStorageLimit(String objectStorageLimit) { + this.objectStorageLimit = objectStorageLimit; + } + + @Override + public void setObjectStorageTotal(Long objectStorageTotal) { + this.objectStorageTotal = objectStorageTotal; + } + + @Override + public void setObjectStorageAvailable(String objectStorageAvailable) { + this.objectStorageAvailable = objectStorageAvailable; + } + public void setDefaultZone(String defaultZoneId) { this.defaultZoneId = defaultZoneId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java index ba44f1e024f..d7c6f96add5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java @@ -37,18 +37,22 @@ public class BackupScheduleResponse extends BaseResponse { @Param(description = "ID of the VM") private String vmId; - @SerializedName("schedule") + @SerializedName(ApiConstants.SCHEDULE) @Param(description = "time the backup is scheduled to be taken.") private String schedule; - @SerializedName("intervaltype") + @SerializedName(ApiConstants.INTERVAL_TYPE) @Param(description = "the interval type of the backup schedule") private DateUtil.IntervalType intervalType; - @SerializedName("timezone") + @SerializedName(ApiConstants.TIMEZONE) @Param(description = "the time zone of the backup schedule") private String timezone; + @SerializedName(ApiConstants.MAX_BACKUPS) + @Param(description = "maximum number of backups retained") + private Integer maxBakups; + public String getVmName() { return vmName; } @@ -88,4 +92,8 @@ public class BackupScheduleResponse extends BaseResponse { public void setTimezone(String timezone) { this.timezone = timezone; } + + public void setMaxBakups(Integer maxBakups) { + this.maxBakups = maxBakups; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java index f2dd365452c..cde140839ec 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java @@ -75,7 +75,7 @@ public class BucketResponse extends BaseResponseWithTagInformation implements Co private String state; @SerializedName(ApiConstants.QUOTA) - @Param(description = "Bucket Quota in GB") + @Param(description = "Bucket Quota in GiB") private Integer quota; @SerializedName(ApiConstants.ENCRYPTION) diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java index 7c6ad3a91c3..74fa2cbb1e4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java @@ -105,6 +105,30 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou @SerializedName("snapshotavailable") @Param(description="the total number of snapshots available for this domain") private String snapshotAvailable; + @SerializedName(ApiConstants.BACKUP_LIMIT) + @Param(description = "the total number of backups which can be stored by this domain", since = "4.21.0") + private String backupLimit; + + @SerializedName(ApiConstants.BACKUP_TOTAL) + @Param(description = "the total number of backups stored by this domain", since = "4.21.0") + private Long backupTotal; + + @SerializedName(ApiConstants.BACKUP_AVAILABLE) + @Param(description = "the total number of backups available to this domain", since = "4.21.0") + private String backupAvailable; + + @SerializedName(ApiConstants.BACKUP_STORAGE_LIMIT) + @Param(description = "the total backup storage space (in GiB) the domain can own", since = "4.21.0") + private String backupStorageLimit; + + @SerializedName(ApiConstants.BACKUP_STORAGE_TOTAL) + @Param(description = "the total backup storage space (in GiB) owned by the domain", since = "4.21.0") + private Long backupStorageTotal; + + @SerializedName(ApiConstants.BACKUP_STORAGE_AVAILABLE) + @Param(description = "the total backup storage space (in GiB) available to the domain", since = "4.21.0") + private String backupStorageAvailable; + @SerializedName("templatelimit") @Param(description="the total number of templates which can be created by this domain") private String templateLimit; @@ -177,6 +201,30 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou @SerializedName("secondarystorageavailable") @Param(description="the total secondary storage space (in GiB) available to be used for this domain", since="4.2.0") private String secondaryStorageAvailable; + @SerializedName(ApiConstants.BUCKET_LIMIT) + @Param(description = "the total number of buckets which can be stored by this domain", since = "4.21.0") + private String bucketLimit; + + @SerializedName(ApiConstants.BUCKET_TOTAL) + @Param(description = "the total number of buckets stored by this domain", since = "4.21.0") + private Long bucketTotal; + + @SerializedName(ApiConstants.BUCKET_AVAILABLE) + @Param(description = "the total number of buckets available to this domain", since = "4.21.0") + private String bucketAvailable; + + @SerializedName(ApiConstants.OBJECT_STORAGE_LIMIT) + @Param(description = "the total object storage space (in GiB) the domain can own", since = "4.21.0") + private String objectStorageLimit; + + @SerializedName(ApiConstants.OBJECT_STORAGE_TOTAL) + @Param(description = "the total object storage space (in GiB) owned by the domain", since = "4.21.0") + private Long objectStorageTotal; + + @SerializedName(ApiConstants.OBJECT_STORAGE_AVAILABLE) + @Param(description = "the total object storage space (in GiB) available to the domain", since = "4.21.0") + private String objectStorageAvailable; + @SerializedName(ApiConstants.RESOURCE_ICON) @Param(description = "Base64 string representation of the resource icon", since = "4.16.0.0") ResourceIconResponse icon; @@ -313,6 +361,36 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou this.snapshotAvailable = snapshotAvailable; } + @Override + public void setBackupLimit(String backupLimit) { + this.backupLimit = backupLimit; + } + + @Override + public void setBackupTotal(Long backupTotal) { + this.backupTotal = backupTotal; + } + + @Override + public void setBackupAvailable(String backupAvailable) { + this.backupAvailable = backupAvailable; + } + + @Override + public void setBackupStorageLimit(String backupStorageLimit) { + this.backupStorageLimit = backupStorageLimit; + } + + @Override + public void setBackupStorageTotal(Long backupStorageTotal) { + this.backupStorageTotal = backupStorageTotal; + } + + @Override + public void setBackupStorageAvailable(String backupStorageAvailable) { + this.backupStorageAvailable = backupStorageAvailable; + } + @Override public void setTemplateLimit(String templateLimit) { this.templateLimit = templateLimit; @@ -430,6 +508,36 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou this.secondaryStorageAvailable = secondaryStorageAvailable; } + @Override + public void setBucketLimit(String bucketLimit) { + this.bucketLimit = bucketLimit; + } + + @Override + public void setBucketTotal(Long bucketTotal) { + this.bucketTotal = bucketTotal; + } + + @Override + public void setBucketAvailable(String bucketAvailable) { + this.bucketAvailable = bucketAvailable; + } + + @Override + public void setObjectStorageLimit(String objectStorageLimit) { + this.objectStorageLimit = objectStorageLimit; + } + + @Override + public void setObjectStorageTotal(Long objectStorageTotal) { + this.objectStorageTotal = objectStorageTotal; + } + + @Override + public void setObjectStorageAvailable(String objectStorageAvailable) { + this.objectStorageAvailable = objectStorageAvailable; + } + public void setState(String state) { this.state = state; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java index 091d6391b31..c9a5c47887d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java @@ -152,7 +152,7 @@ public class HostResponse extends BaseResponseWithAnnotations { @Deprecated @SerializedName("memoryallocated") @Param(description = "the amount of the host's memory currently allocated") - private long memoryAllocated; + private Long memoryAllocated; @SerializedName("memoryallocatedpercentage") @Param(description = "the amount of the host's memory currently allocated in percentage") @@ -415,7 +415,7 @@ public class HostResponse extends BaseResponseWithAnnotations { this.memWithOverprovisioning=memWithOverprovisioning; } - public void setMemoryAllocated(long memoryAllocated) { + public void setMemoryAllocated(Long memoryAllocated) { this.memoryAllocated = memoryAllocated; } @@ -703,8 +703,8 @@ public class HostResponse extends BaseResponseWithAnnotations { return memoryTotal; } - public long getMemoryAllocated() { - return memoryAllocated; + public Long getMemoryAllocated() { + return memoryAllocated == null ? 0 : memoryAllocated; } public void setMemoryAllocatedPercentage(String memoryAllocatedPercentage) { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java index df55a63a060..729fb5ff3bc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java @@ -74,9 +74,9 @@ public class ManagementServerResponse extends BaseResponse { @Param(description = "the running OS kernel version for this Management Server") private String kernelVersion; - @SerializedName(ApiConstants.SERVICE_IP) + @SerializedName(ApiConstants.IP_ADDRESS) @Param(description = "the IP Address for this Management Server") - private String serviceIp; + private String ipAddress; @SerializedName(ApiConstants.PEERS) @Param(description = "the Management Server Peers") @@ -130,8 +130,8 @@ public class ManagementServerResponse extends BaseResponse { return lastBoot; } - public String getServiceIp() { - return serviceIp; + public String getIpAddress() { + return ipAddress; } public Long getAgentsCount() { @@ -186,8 +186,8 @@ public class ManagementServerResponse extends BaseResponse { this.kernelVersion = kernelVersion; } - public void setServiceIp(String serviceIp) { - this.serviceIp = serviceIp; + public void setIpAddress(String ipAddress) { + this.ipAddress = ipAddress; } public void setAgentsCount(Long agentsCount) { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java index a80317c83cd..db811ffbe2d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java @@ -196,6 +196,10 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement @Param(description = "true network requires restart") private Boolean restartRequired; + @SerializedName(ApiConstants.SPECIFY_VLAN) + @Param(description = "true if network supports specifying vlan, false otherwise") + private Boolean specifyVlan; + @SerializedName(ApiConstants.SPECIFY_IP_RANGES) @Param(description = "true if network supports specifying ip ranges, false otherwise") private Boolean specifyIpRanges; @@ -516,6 +520,10 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement this.restartRequired = restartRequired; } + public void setSpecifyVlan(Boolean specifyVlan) { + this.specifyVlan = specifyVlan; + } + public void setSpecifyIpRanges(Boolean specifyIpRanges) { this.specifyIpRanges = specifyIpRanges; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java index 1c63697559b..8bdf042add0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java @@ -140,6 +140,30 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou @Param(description = "the total secondary storage space (in GiB) available to be used for this project", since = "4.2.0") private String secondaryStorageAvailable; + @SerializedName(ApiConstants.BUCKET_LIMIT) + @Param(description = "the total number of buckets which can be stored by this project", since = "4.21.0") + private String bucketLimit; + + @SerializedName(ApiConstants.BUCKET_TOTAL) + @Param(description = "the total number of buckets stored by this project", since = "4.21.0") + private Long bucketTotal; + + @SerializedName(ApiConstants.BUCKET_AVAILABLE) + @Param(description = "the total number of buckets available to this project", since = "4.21.0") + private String bucketAvailable; + + @SerializedName(ApiConstants.OBJECT_STORAGE_LIMIT) + @Param(description = "the total object storage space (in GiB) the project can own", since = "4.21.0") + private String objectStorageLimit; + + @SerializedName(ApiConstants.OBJECT_STORAGE_TOTAL) + @Param(description = "the total object storage space (in GiB) owned by the project", since = "4.21.0") + private Long objectStorageTotal; + + @SerializedName(ApiConstants.OBJECT_STORAGE_AVAILABLE) + @Param(description = "the total object storage space (in GiB) available to the project", since = "4.21.0") + private String objectStorageAvailable; + @SerializedName(ApiConstants.VM_LIMIT) @Param(description = "the total number of virtual machines that can be deployed by this project", since = "4.2.0") private String vmLimit; @@ -188,6 +212,30 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou @Param(description = "the total number of snapshots available for this project", since = "4.2.0") private String snapshotAvailable; + @SerializedName(ApiConstants.BACKUP_LIMIT) + @Param(description = "the total number of backups which can be stored by this project", since = "4.21.0") + private String backupLimit; + + @SerializedName(ApiConstants.BACKUP_TOTAL) + @Param(description = "the total number of backups stored by this project", since = "4.21.0") + private Long backupTotal; + + @SerializedName(ApiConstants.BACKUP_AVAILABLE) + @Param(description = "the total number of backups available to this project", since = "4.21.0") + private String backupAvailable; + + @SerializedName(ApiConstants.BACKUP_STORAGE_LIMIT) + @Param(description = "the total backup storage space (in GiB) the project can own", since = "4.21.0") + private String backupStorageLimit; + + @SerializedName(ApiConstants.BACKUP_STORAGE_TOTAL) + @Param(description = "the total backup storage space (in GiB) owned by the project", since = "4.21.0") + private Long backupStorageTotal; + + @SerializedName(ApiConstants.BACKUP_STORAGE_AVAILABLE) + @Param(description = "the total backup storage space (in GiB) available to the project", since = "4.21.0") + private String backupStorageAvailable; + @SerializedName("templatelimit") @Param(description = "the total number of templates which can be created by this project", since = "4.2.0") private String templateLimit; @@ -320,6 +368,36 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou this.snapshotAvailable = snapshotAvailable; } + @Override + public void setBackupLimit(String backupLimit) { + this.backupLimit = backupLimit; + } + + @Override + public void setBackupTotal(Long backupTotal) { + this.backupTotal = backupTotal; + } + + @Override + public void setBackupAvailable(String backupAvailable) { + this.backupAvailable = backupAvailable; + } + + @Override + public void setBackupStorageLimit(String backupStorageLimit) { + this.backupStorageLimit = backupStorageLimit; + } + + @Override + public void setBackupStorageTotal(Long backupStorageTotal) { + this.backupStorageTotal = backupStorageTotal; + } + + @Override + public void setBackupStorageAvailable(String backupStorageAvailable) { + this.backupStorageAvailable = backupStorageAvailable; + } + @Override public void setTemplateLimit(String templateLimit) { this.templateLimit = templateLimit; @@ -435,6 +513,36 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou this.secondaryStorageAvailable = secondaryStorageAvailable; } + @Override + public void setBucketLimit(String bucketLimit) { + this.bucketLimit = bucketLimit; + } + + @Override + public void setBucketTotal(Long bucketTotal) { + this.bucketTotal = bucketTotal; + } + + @Override + public void setBucketAvailable(String bucketAvailable) { + this.bucketAvailable = bucketAvailable; + } + + @Override + public void setObjectStorageLimit(String objectStorageLimit) { + this.objectStorageLimit = objectStorageLimit; + } + + @Override + public void setObjectStorageTotal(Long objectStorageTotal) { + this.objectStorageTotal = objectStorageTotal; + } + + @Override + public void setObjectStorageAvailable(String objectStorageAvailable) { + this.objectStorageAvailable = objectStorageAvailable; + } + public void setOwners(List> owners) { this.owners = owners; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitAndCountResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitAndCountResponse.java index f9e6df3a038..b86723b36c4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitAndCountResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitAndCountResponse.java @@ -84,6 +84,30 @@ public interface ResourceLimitAndCountResponse { public void setSnapshotAvailable(String snapshotAvailable); + public void setBackupLimit(String backupLimit); + + public void setBackupTotal(Long backupTotal); + + public void setBackupAvailable(String backupAvailable); + + public void setBackupStorageLimit(String backupStorageLimit); + + public void setBackupStorageTotal(Long backupStorageTotal); + + public void setBackupStorageAvailable(String backupStorageAvailable); + + void setBucketLimit(String bucketLimit); + + void setBucketTotal(Long bucketTotal); + + void setBucketAvailable(String bucketAvailable); + + void setObjectStorageLimit(String objectStorageLimit); + + void setObjectStorageTotal(Long objectStorageTotal); + + void setObjectStorageAvailable(String objectStorageAvailable); + public void setTemplateLimit(String templateLimit); public void setTemplateTotal(Long templateTotal); diff --git a/api/src/main/java/org/apache/cloudstack/backup/Backup.java b/api/src/main/java/org/apache/cloudstack/backup/Backup.java index f21f20adb33..dffe8a03213 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/Backup.java +++ b/api/src/main/java/org/apache/cloudstack/backup/Backup.java @@ -33,6 +33,28 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { Allocated, Queued, BackingUp, BackedUp, Error, Failed, Restoring, Removed, Expunged } + public enum Type { + MANUAL, HOURLY, DAILY, WEEKLY, MONTHLY; + private int max = 8; + + public void setMax(int max) { + this.max = max; + } + + public int getMax() { + return max; + } + + @Override + public String toString() { + return this.name(); + } + + public boolean equals(String snapshotType) { + return this.toString().equalsIgnoreCase(snapshotType); + } + } + class Metric { private Long backupSize = 0L; private Long dataSize = 0L; diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index 8b45bb4ee5e..cbd4b7e0596 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.backup; import java.util.List; +import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd; import org.apache.cloudstack.api.command.admin.backup.UpdateBackupOfferingCmd; import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd; @@ -56,6 +57,86 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer "false", "Enable volume attach/detach operations for VMs that are assigned to Backup Offerings.", true); + ConfigKey BackupHourlyMax = new ConfigKey("Advanced", Integer.class, + "backup.max.hourly", + "8", + "Maximum recurring hourly backups to be retained for an instance. If the limit is reached, early backups from the start of the hour are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring hourly backups can not be scheduled.", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey BackupDailyMax = new ConfigKey("Advanced", Integer.class, + "backup.max.daily", + "8", + "Maximum recurring daily backups to be retained for an instance. If the limit is reached, backups from the start of the day are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring daily backups can not be scheduled.", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey BackupWeeklyMax = new ConfigKey("Advanced", Integer.class, + "backup.max.weekly", + "8", + "Maximum recurring weekly backups to be retained for an instance. If the limit is reached, backups from the beginning of the week are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring weekly backups can not be scheduled.", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey BackupMonthlyMax = new ConfigKey("Advanced", Integer.class, + "backup.max.monthly", + "8", + "Maximum recurring monthly backups to be retained for an instance. If the limit is reached, backups from the beginning of the month are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring monthly backups can not be scheduled.", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxAccountBackups = new ConfigKey("Account Defaults", Long.class, + "max.account.backups", + "20", + "The default maximum number of backups that can be created for an account", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxAccountBackupStorage = new ConfigKey("Account Defaults", Long.class, + "max.account.backup.storage", + "400", + "The default maximum backup storage space (in GiB) that can be used for an account", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxProjectBackups = new ConfigKey("Project Defaults", Long.class, + "max.project.backups", + "20", + "The default maximum number of backups that can be created for a project", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxProjectBackupStorage = new ConfigKey("Project Defaults", Long.class, + "max.project.backup.storage", + "400", + "The default maximum backup storage space (in GiB) that can be used for a project", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxDomainBackups = new ConfigKey("Domain Defaults", Long.class, + "max.domain.backups", + "40", + "The default maximum number of backups that can be created for a domain", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxDomainBackupStorage = new ConfigKey("Domain Defaults", Long.class, + "max.domain.backup.storage", + "800", + "The default maximum backup storage space (in GiB) that can be used for a domain", + false, + ConfigKey.Scope.Global, + null); + /** * List backup provider offerings * @param zoneId zone id @@ -119,9 +200,10 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer /** * Creates backup of a VM * @param vmId Virtual Machine ID + * @param scheduleId Virtual Machine Backup Schedule ID * @return returns operation success */ - boolean createBackup(final Long vmId); + boolean createBackup(final Long vmId, final Long scheduleId) throws ResourceAllocationException; /** * List existing backups for a VM diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java index d36dfb7360f..e3a6c3a62bd 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java @@ -75,7 +75,7 @@ public interface BackupProvider { * @param backup * @return */ - boolean takeBackup(VirtualMachine vm); + Pair takeBackup(VirtualMachine vm); /** * Delete an existing backup @@ -104,9 +104,16 @@ public interface BackupProvider { Map getBackupMetrics(Long zoneId, List vms); /** - * This method should reconcile and create backup entries for any backups created out-of-band - * @param vm + * This method should TODO + * @param + */ + public List listRestorePoints(VirtualMachine vm); + + /** + * This method should TODO + * @param + * @param * @param metric */ - void syncBackups(VirtualMachine vm, Backup.Metric metric); + Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java b/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java index d81dd731b1f..4ff946be9cd 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java @@ -30,4 +30,5 @@ public interface BackupSchedule extends InternalIdentity { String getTimezone(); Date getScheduledTimestamp(); Long getAsyncJobId(); + Integer getMaxBackups(); } diff --git a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java index d670e4d3a88..4f6f1ad66c9 100644 --- a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java +++ b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java @@ -39,7 +39,7 @@ public interface OutOfBandManagementService { long getId(); boolean isOutOfBandManagementEnabled(Host host); void submitBackgroundPowerSyncTask(Host host); - boolean transitionPowerStateToDisabled(List hosts); + boolean transitionPowerStateToDisabled(List hostIds); OutOfBandManagementResponse enableOutOfBandManagement(DataCenter zone); OutOfBandManagementResponse enableOutOfBandManagement(Cluster cluster); diff --git a/api/src/main/java/org/apache/cloudstack/storage/object/BucketApiService.java b/api/src/main/java/org/apache/cloudstack/storage/object/BucketApiService.java index 7e1361d1e71..e27ef308d7f 100644 --- a/api/src/main/java/org/apache/cloudstack/storage/object/BucketApiService.java +++ b/api/src/main/java/org/apache/cloudstack/storage/object/BucketApiService.java @@ -22,10 +22,59 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.user.Account; import org.apache.cloudstack.api.command.user.bucket.CreateBucketCmd; import org.apache.cloudstack.api.command.user.bucket.UpdateBucketCmd; +import org.apache.cloudstack.framework.config.ConfigKey; public interface BucketApiService { + ConfigKey DefaultMaxAccountBuckets = new ConfigKey("Account Defaults", Long.class, + "max.account.buckets", + "20", + "The default maximum number of buckets that can be created for an account", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxAccountObjectStorage = new ConfigKey("Account Defaults", Long.class, + "max.account.object.storage", + "400", + "The default maximum object storage space (in GiB) that can be used for an account", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxProjectBuckets = new ConfigKey("Project Defaults", Long.class, + "max.project.buckets", + "20", + "The default maximum number of buckets that can be created for a project", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxProjectObjectStorage = new ConfigKey("Project Defaults", Long.class, + "max.project.object.storage", + "400", + "The default maximum object storage space (in GiB) that can be used for a project", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxDomainBuckets = new ConfigKey("Domain Defaults", Long.class, + "max.domain.buckets", + "20", + "The default maximum number of buckets that can be created for a domain", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey DefaultMaxDomainObjectStorage = new ConfigKey("Domain Defaults", Long.class, + "max.domain.object.storage", + "400", + "The default maximum object storage space (in GiB) that can be used for a domain", + false, + ConfigKey.Scope.Global, + null); + /** * Creates the database object for a Bucket based on the given criteria * @@ -48,7 +97,7 @@ public interface BucketApiService { boolean deleteBucket(long bucketId, Account caller); - boolean updateBucket(UpdateBucketCmd cmd, Account caller); + boolean updateBucket(UpdateBucketCmd cmd, Account caller) throws ResourceAllocationException; void getBucketUsage(); } diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 0802098cb4f..3d5646f68c9 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -33,6 +33,8 @@ public class UnmanagedInstanceTO { private String internalCSName; + private String path; + private PowerState powerState; private PowerState cloneSourcePowerState; @@ -75,6 +77,14 @@ public class UnmanagedInstanceTO { this.internalCSName = internalCSName; } + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + public PowerState getPowerState() { return powerState; } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java index 3c9d4cb67ae..45f175e9a81 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.admin.domain; import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; import org.junit.Assert; import org.junit.Test; @@ -71,7 +72,17 @@ public class ListDomainsCmdTest { cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); - Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + Mockito.verify(resourceLimitService).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + } + + @Test + public void testUpdateDomainResponseWithDomainsMinDetails() { + ListDomainsCmd cmd = new ListDomainsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java index 896a7a6c826..a1ba9270345 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.account; import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.AccountResponse; import org.junit.Assert; import org.junit.Test; @@ -58,7 +59,7 @@ public class ListAccountsCmdTest { } @Test - public void testUpdateDomainResponseNoDomains() { + public void testUpdateAccountResponseNoAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; cmd.updateAccountResponse(null); @@ -66,11 +67,21 @@ public class ListAccountsCmdTest { } @Test - public void testUpdateDomainResponseWithDomains() { + public void testUpdateDomainResponseWithAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); } + + @Test + public void testUpdateDomainResponseWithAccountsMinDetails() { + ListAccountsCmd cmd = new ListAccountsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); + } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmdTest.java new file mode 100644 index 00000000000..c905974b2be --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmdTest.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.firewall; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.utils.net.NetUtils; + +@RunWith(MockitoJUnitRunner.class) +public class CreateFirewallRuleCmdTest { + + private void validateAllIp4Cidr(final CreateFirewallRuleCmd cmd) { + Assert.assertTrue(CollectionUtils.isNotEmpty(cmd.getSourceCidrList())); + Assert.assertEquals(1, cmd.getSourceCidrList().size()); + Assert.assertEquals(NetUtils.ALL_IP4_CIDRS, cmd.getSourceCidrList().get(0)); + } + + @Test + public void testGetSourceCidrList_Null() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "cidrlist", null); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_Empty() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "cidrlist", new ArrayList<>()); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_NullFirstElement() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + List list = new ArrayList<>(); + list.add(null); + ReflectionTestUtils.setField(cmd, "cidrlist", list); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_EmptyFirstElement() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "cidrlist", Collections.singletonList(" ")); + validateAllIp4Cidr(cmd); + } + + @Test + public void testGetSourceCidrList_Valid() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + String cidr = "10.1.1.1/22"; + ReflectionTestUtils.setField(cmd, "cidrlist", Collections.singletonList(cidr)); + Assert.assertTrue(CollectionUtils.isNotEmpty(cmd.getSourceCidrList())); + Assert.assertEquals(1, cmd.getSourceCidrList().size()); + Assert.assertEquals(cidr, cmd.getSourceCidrList().get(0)); + } + + @Test + public void testGetSourceCidrList_EmptyFirstElementButMore() { + final CreateFirewallRuleCmd cmd = new CreateFirewallRuleCmd(); + String cidr = "10.1.1.1/22"; + ReflectionTestUtils.setField(cmd, "cidrlist", Arrays.asList(" ", cidr)); + Assert.assertTrue(CollectionUtils.isNotEmpty(cmd.getSourceCidrList())); + Assert.assertEquals(2, cmd.getSourceCidrList().size()); + Assert.assertEquals(cidr, cmd.getSourceCidrList().get(1)); + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CleanupVMCommand.java b/core/src/main/java/com/cloud/agent/api/CleanupVMCommand.java new file mode 100644 index 00000000000..a4d73a8b164 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CleanupVMCommand.java @@ -0,0 +1,46 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +/** + * This command will destroy a leftover VM during the expunge process if it wasn't destroyed before. + * + */ +public class CleanupVMCommand extends Command { + String vmName; + boolean executeInSequence; + + public CleanupVMCommand(String vmName) { + this(vmName, false); + } + public CleanupVMCommand(String vmName, boolean executeInSequence) { + this.vmName = vmName; + this.executeInSequence = executeInSequence; + } + + @Override + public boolean executeInSequence() { + return executeInSequence; + } + + public String getVmName() { + return vmName; + } +} diff --git a/core/src/main/java/com/cloud/resource/ServerResource.java b/core/src/main/java/com/cloud/resource/ServerResource.java index 981f03b738a..845ac8a48fa 100644 --- a/core/src/main/java/com/cloud/resource/ServerResource.java +++ b/core/src/main/java/com/cloud/resource/ServerResource.java @@ -82,4 +82,12 @@ public interface ServerResource extends Manager { void setAgentControl(IAgentControl agentControl); + default boolean isExitOnFailures() { + return true; + } + + default boolean isAppendAgentNameToLogs() { + return false; + } + } diff --git a/debian/control b/debian/control index a773844c27c..1292639ef30 100644 --- a/debian/control +++ b/debian/control @@ -1,7 +1,7 @@ Source: cloudstack Section: libs Priority: extra -Maintainer: Wido den Hollander +Maintainer: The Apache CloudStack Team Build-Depends: debhelper (>= 9), openjdk-17-jdk | java17-sdk | java17-jdk | zulu-17 | openjdk-11-jdk | java11-sdk | java11-jdk | zulu-11, genisoimage, python-mysql.connector | python3-mysql.connector | mysql-connector-python-py3, maven (>= 3) | maven3, python (>= 2.7) | python2 (>= 2.7), python3 (>= 3), python-setuptools, python3-setuptools, diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index e8ffd86ac4f..94c73d8f4d6 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -22,7 +22,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; @@ -38,6 +37,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -101,6 +101,10 @@ public interface VirtualMachineManager extends Manager { "refer documentation", true, ConfigKey.Scope.Zone); + ConfigKey VmSyncPowerStateTransitioning = new ConfigKey<>("Advanced", Boolean.class, "vm.sync.power.state.transitioning", "true", + "Whether to sync power states of the transitioning and stalled VMs while processing VM power reports.", false); + + interface Topics { String VM_POWER_STATE = "vm.powerstate"; } @@ -286,24 +290,22 @@ public interface VirtualMachineManager extends Manager { /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host + * @param host host * @param vmIds list of VM IDs * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds); + HashMap getVirtualMachineStatistics(Host host, List vmIds); /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host - * @param vmMap map of VM IDs and the corresponding VirtualMachine object + * @param host host + * @param vmMap map of VM instanceName and its ID * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap); + HashMap getVirtualMachineStatistics(Host host, Map vmMap); - HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap); - HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap); Map getDiskOfferingSuitabilityForVm(long vmId, List diskOfferingIds); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java index d8e97f0277b..8463d9cee98 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java @@ -82,6 +82,9 @@ public interface NetworkOrchestrationService { ConfigKey NetworkLockTimeout = new ConfigKey(Integer.class, NetworkLockTimeoutCK, "Network", "600", "Lock wait timeout (seconds) while implementing network", true, Scope.Global, null); + ConfigKey DeniedRoutes = new ConfigKey(String.class, "denied.routes", "Network", "", + "Routes that are denied, can not be used for Static Routes creation for the VPC Private Gateway", true, ConfigKey.Scope.Zone, null); + ConfigKey GuestDomainSuffix = new ConfigKey(String.class, GuestDomainSuffixCK, "Network", "cloud.internal", "Default domain name for vms inside virtualized networks fronted by router", true, ConfigKey.Scope.Zone, null); diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index cbd137e8682..4c81c7359f2 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -16,14 +16,13 @@ // under the License. package com.cloud.capacity; -import java.util.Map; +import java.util.List; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; -import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -70,7 +69,7 @@ public interface CapacityManager { "0.85", "Percentage (as a value between 0 and 1) of storage utilization above which allocators will disable using the pool for low storage available.", true, - ConfigKey.Scope.Zone); + List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); static final ConfigKey StorageOverprovisioningFactor = new ConfigKey<>( "Storage", @@ -88,7 +87,7 @@ public interface CapacityManager { "0.85", "Percentage (as a value between 0 and 1) of allocated storage utilization above which allocators will disable using the pool for low allocated storage available.", true, - ConfigKey.Scope.Zone); + List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); static final ConfigKey StorageOperationsExcludeCluster = new ConfigKey<>( Boolean.class, @@ -128,7 +127,11 @@ public interface CapacityManager { "Percentage (as a value between 0 and 1) of allocated storage utilization above which allocators will disable using the pool for volume resize. " + "This is applicable only when volume.resize.allowed.beyond.allocation is set to true.", true, - ConfigKey.Scope.Zone); + List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); + + ConfigKey CapacityCalculateWorkers = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class, + "capacity.calculate.workers", "1", + "Number of worker threads to be used for capacities calculation", true); public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId); @@ -145,8 +148,6 @@ public interface CapacityManager { void updateCapacityForHost(Host host); - void updateCapacityForHost(Host host, Map offeringsMap); - /** * @param pool storage pool * @param templateForVmCreation template that will be used for vm creation @@ -163,12 +164,12 @@ public interface CapacityManager { /** * Check if specified host has capability to support cpu cores and speed freq - * @param hostId the host to be checked + * @param host the host to be checked * @param cpuNum cpu number to check * @param cpuSpeed cpu Speed to check * @return true if the count of host's running VMs >= hypervisor limit */ - boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed); + boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed); /** * Check if cluster will cross threshold if the cpu/memory requested are accommodated diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index 3db2afb503d..3e65ddf78e2 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -140,13 +140,13 @@ public interface ResourceManager extends ResourceService, Configurable { public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, long dcId, long clusterId); - public List listAvailHypervisorInZone(Long hostId, Long zoneId); + public List listAvailHypervisorInZone(Long zoneId); public HostVO findHostByGuid(String guid); public HostVO findHostByName(String name); - HostStats getHostStatistics(long hostId); + HostStats getHostStatistics(Host host); Long getGuestOSCategoryId(long hostId); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 0b9f7bcb7db..46f796b4f78 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -42,6 +43,7 @@ import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; @@ -212,7 +214,11 @@ public interface StorageManager extends StorageService { ConfigKey AllowVolumeReSizeBeyondAllocation = new ConfigKey("Advanced", Boolean.class, "volume.resize.allowed.beyond.allocation", "false", "Determines whether volume size can exceed the pool capacity allocation disable threshold (pool.storage.allocated.capacity.disablethreshold) " + "when resize a volume upto resize capacity disable threshold (pool.storage.allocated.resize.capacity.disablethreshold)", - true, ConfigKey.Scope.Zone); + true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); + + ConfigKey StoragePoolHostConnectWorkers = new ConfigKey<>("Storage", Integer.class, + "storage.pool.host.connect.workers", "1", + "Number of worker threads to be used to connect hosts to a primary storage", true); /** * should we execute in sequence not involving any storages? @@ -365,6 +371,9 @@ public interface StorageManager extends StorageService { String getStoragePoolMountFailureReason(String error); + void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, + boolean handleStorageConflictException, boolean errorOnNoUpHost) throws CloudRuntimeException; + boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index f154eaddc1e..765602e42d0 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -19,6 +19,7 @@ package com.cloud.agent.manager; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; +import java.net.SocketAddress; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; import java.util.Arrays; @@ -26,25 +27,20 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.cluster.ManagementServerHostVO; -import com.cloud.cluster.dao.ManagementServerHostDao; -import com.cloud.configuration.Config; -import com.cloud.org.Cluster; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.GlobalLock; import org.apache.cloudstack.agent.lb.IndirectAgentLB; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -62,6 +58,8 @@ import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.MapUtils; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -88,6 +86,9 @@ import com.cloud.agent.api.UnsupportedAnswer; import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.alert.AlertManager; +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.configuration.Config; import com.cloud.configuration.ManagementServiceConfiguration; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; @@ -107,15 +108,18 @@ import com.cloud.host.Status.Event; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.org.Cluster; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; @@ -130,8 +134,6 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.nio.NioServer; import com.cloud.utils.nio.Task; import com.cloud.utils.time.InaccurateClock; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.ThreadContext; /** * Implementation of the Agent Manager. This class controls the connection to the agents. @@ -142,14 +144,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl * _agents is a ConcurrentHashMap, but it is used from within a synchronized block. This will be reported by findbugs as JLM_JSR166_UTILCONCURRENT_MONITORENTER. Maybe a * ConcurrentHashMap is not the right thing to use here, but i'm not sure so i leave it alone. */ - protected ConcurrentHashMap _agents = new ConcurrentHashMap(10007); - protected List> _hostMonitors = new ArrayList>(17); - protected List> _cmdMonitors = new ArrayList>(17); - protected List> _creationMonitors = new ArrayList>(17); - protected List _loadingAgents = new ArrayList(); + protected ConcurrentHashMap _agents = new ConcurrentHashMap<>(10007); + protected List> _hostMonitors = new ArrayList<>(17); + protected List> _cmdMonitors = new ArrayList<>(17); + protected List> _creationMonitors = new ArrayList<>(17); + protected List _loadingAgents = new ArrayList<>(); protected Map _commandTimeouts = new HashMap<>(); private int _monitorId = 0; - private final Lock _agentStatusLock = new ReentrantLock(); @Inject protected CAManager caService; @@ -201,25 +202,36 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl private List lastAgents = null; protected StateMachine2 _statusStateMachine = Status.getStateMachine(); - private final ConcurrentHashMap _pingMap = new ConcurrentHashMap(10007); + private final ConcurrentHashMap _pingMap = new ConcurrentHashMap<>(10007); + private int maxConcurrentNewAgentConnections; + private final ConcurrentHashMap newAgentConnections = new ConcurrentHashMap<>(); + protected ScheduledExecutorService newAgentConnectionsMonitor; @Inject ResourceManager _resourceMgr; @Inject ManagementServiceConfiguration mgmtServiceConf; - protected final ConfigKey Workers = new ConfigKey("Advanced", Integer.class, "workers", "5", + protected final ConfigKey Workers = new ConfigKey<>("Advanced", Integer.class, "workers", "5", "Number of worker threads handling remote agent connections.", false); - protected final ConfigKey Port = new ConfigKey("Advanced", Integer.class, "port", "8250", "Port to listen on for remote agent connections.", false); - protected final ConfigKey AlertWait = new ConfigKey("Advanced", Integer.class, "alert.wait", "1800", + protected final ConfigKey Port = new ConfigKey<>("Advanced", Integer.class, "port", "8250", "Port to listen on for remote agent connections.", false); + protected final ConfigKey RemoteAgentSslHandshakeTimeout = new ConfigKey<>("Advanced", + Integer.class, "agent.ssl.handshake.timeout", "30", + "Seconds after which SSL handshake times out during remote agent connections.", false); + protected final ConfigKey RemoteAgentMaxConcurrentNewConnections = new ConfigKey<>("Advanced", + Integer.class, "agent.max.concurrent.new.connections", "0", + "Number of maximum concurrent new connections server allows for remote agents. " + + "If set to zero (default value) then no limit will be enforced on concurrent new connections", + false); + protected final ConfigKey AlertWait = new ConfigKey<>("Advanced", Integer.class, "alert.wait", "1800", "Seconds to wait before alerting on a disconnected agent", true); - protected final ConfigKey DirectAgentLoadSize = new ConfigKey("Advanced", Integer.class, "direct.agent.load.size", "16", + protected final ConfigKey DirectAgentLoadSize = new ConfigKey<>("Advanced", Integer.class, "direct.agent.load.size", "16", "The number of direct agents to load each time", false); - protected final ConfigKey DirectAgentPoolSize = new ConfigKey("Advanced", Integer.class, "direct.agent.pool.size", "500", + protected final ConfigKey DirectAgentPoolSize = new ConfigKey<>("Advanced", Integer.class, "direct.agent.pool.size", "500", "Default size for DirectAgentPool", false); - protected final ConfigKey DirectAgentThreadCap = new ConfigKey("Advanced", Float.class, "direct.agent.thread.cap", "1", + protected final ConfigKey DirectAgentThreadCap = new ConfigKey<>("Advanced", Float.class, "direct.agent.thread.cap", "1", "Percentage (as a value between 0 and 1) of direct.agent.pool.size to be used as upper thread cap for a single direct agent to process requests", false); - protected final ConfigKey CheckTxnBeforeSending = new ConfigKey("Developer", Boolean.class, "check.txn.before.sending.agent.commands", "false", + protected final ConfigKey CheckTxnBeforeSending = new ConfigKey<>("Developer", Boolean.class, "check.txn.before.sending.agent.commands", "false", "This parameter allows developers to enable a check to see if a transaction wraps commands that are sent to the resource. This is not to be enabled on production systems.", true); @Override @@ -227,8 +239,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout()); - final int threads = DirectAgentLoadSize.value(); - _nodeId = ManagementServerNode.getManagementServerId(); logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId); @@ -241,24 +251,32 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl managementServerMaintenanceManager.registerListener(this); - _executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentTaskPool")); + final int agentTaskThreads = DirectAgentLoadSize.value(); - _connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentConnectTaskPool")); + _executor = new ThreadPoolExecutor(agentTaskThreads, agentTaskThreads, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentTaskPool")); + + _connectExecutor = new ThreadPoolExecutor(100, 500, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentConnectTaskPool")); // allow core threads to time out even when there are no items in the queue _connectExecutor.allowCoreThreadTimeOut(true); - _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService); + maxConcurrentNewAgentConnections = RemoteAgentMaxConcurrentNewConnections.value(); + + _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, + this, caService, RemoteAgentSslHandshakeTimeout.value()); logger.info("Listening on {} with {} workers.", Port.value(), Workers.value()); + final int directAgentPoolSize = DirectAgentPoolSize.value(); // executes all agent commands other than cron and ping - _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); + _directAgentExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands - _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value()); - _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 + _cronJobExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgentCronJob")); + logger.debug("Created DirectAgentAttache pool with size: {}.", directAgentPoolSize); + _directAgentThreadCap = Math.round(directAgentPoolSize * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); + newAgentConnectionsMonitor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("NewAgentConnectionsMonitor")); + initializeCommandTimeouts(); return true; @@ -269,22 +287,44 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return new AgentHandler(type, link, data); } + @Override + public int getMaxConcurrentNewConnectionsCount() { + return maxConcurrentNewAgentConnections; + } + + @Override + public int getNewConnectionsCount() { + return newAgentConnections.size(); + } + + @Override + public void registerNewConnection(SocketAddress address) { + logger.trace("Adding new agent connection from {}", address.toString()); + newAgentConnections.putIfAbsent(address.toString(), System.currentTimeMillis()); + } + + @Override + public void unregisterNewConnection(SocketAddress address) { + logger.trace("Removing new agent connection for {}", address.toString()); + newAgentConnections.remove(address.toString()); + } + @Override public int registerForHostEvents(final Listener listener, final boolean connections, final boolean commands, final boolean priority) { synchronized (_hostMonitors) { _monitorId++; if (connections) { if (priority) { - _hostMonitors.add(0, new Pair(_monitorId, listener)); + _hostMonitors.add(0, new Pair<>(_monitorId, listener)); } else { - _hostMonitors.add(new Pair(_monitorId, listener)); + _hostMonitors.add(new Pair<>(_monitorId, listener)); } } if (commands) { if (priority) { - _cmdMonitors.add(0, new Pair(_monitorId, listener)); + _cmdMonitors.add(0, new Pair<>(_monitorId, listener)); } else { - _cmdMonitors.add(new Pair(_monitorId, listener)); + _cmdMonitors.add(new Pair<>(_monitorId, listener)); } } logger.debug("Registering listener {} with id {}", listener.getClass().getSimpleName(), _monitorId); @@ -297,9 +337,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl synchronized (_hostMonitors) { _monitorId++; if (priority) { - _creationMonitors.add(0, new Pair(_monitorId, creator)); + _creationMonitors.add(0, new Pair<>(_monitorId, creator)); } else { - _creationMonitors.add(new Pair(_monitorId, creator)); + _creationMonitors.add(new Pair<>(_monitorId, creator)); } return _monitorId; } @@ -331,7 +371,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public void onManagementServerCancelMaintenance() { logger.debug("Management server maintenance disabled"); if (_connectExecutor.isShutdown()) { - _connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentConnectTaskPool")); + _connectExecutor = new ThreadPoolExecutor(100, 500, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentConnectTaskPool")); _connectExecutor.allowCoreThreadTimeOut(true); } @@ -351,7 +391,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } private AgentControlAnswer handleControlCommand(final AgentAttache attache, final AgentControlCommand cmd) { - AgentControlAnswer answer = null; + AgentControlAnswer answer; for (final Pair listener : _cmdMonitors) { answer = listener.second().processControlCommand(attache.getId(), cmd); @@ -379,7 +419,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } public AgentAttache findAttache(final long hostId) { - AgentAttache attache = null; + AgentAttache attache; synchronized (_agents) { attache = _agents.get(hostId); } @@ -431,12 +471,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl cmds.addCommand(cmd); send(hostId, cmds, cmd.getWait()); final Answer[] answers = cmds.getAnswers(); - if (answers != null && !(answers[0] instanceof UnsupportedAnswer)) { - return answers[0]; - } - - if (answers != null && answers[0] instanceof UnsupportedAnswer) { - logger.warn("Unsupported Command: {}", answers[0].getDetails()); + if (answers != null) { + if (answers[0] instanceof UnsupportedAnswer) { + logger.warn("Unsupported Command: {}", answers[0].getDetails()); + } return answers[0]; } @@ -467,8 +505,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } /** - * @param commands - * @return + * @param commands object container of commands + * @return array of commands */ private Command[] checkForCommandsAndTag(final Commands commands) { final Command[] cmds = commands.toCommands(); @@ -484,8 +522,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } /** - * @param commands - * @param cmds + * @param commands object container of commands + * @param cmds array of commands */ private void setEmptyAnswers(final Commands commands, final Command[] cmds) { if (cmds.length == 0) { @@ -524,7 +562,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl String commandWaits = GranularWaitTimeForCommands.value().trim(); if (StringUtils.isNotEmpty(commandWaits)) { _commandTimeouts = getCommandTimeoutsMap(commandWaits); - logger.info(String.format("Timeouts for management server internal commands successfully initialized from global setting commands.timeout: %s", _commandTimeouts)); + logger.info("Timeouts for management server internal commands successfully initialized from global setting commands.timeout: {}", _commandTimeouts); } } @@ -540,10 +578,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl int commandTimeout = Integer.parseInt(parts[1].trim()); commandTimeouts.put(commandName, commandTimeout); } catch (NumberFormatException e) { - logger.error(String.format("Initialising the timeouts using commands.timeout: %s for management server internal commands failed with error %s", commandPair, e.getMessage())); + logger.error("Initialising the timeouts using commands.timeout: {} for management server internal commands failed with error {}", commandPair, e.getMessage()); } } else { - logger.error(String.format("Error initialising the timeouts for management server internal commands. Invalid format in commands.timeout: %s", commandPair)); + logger.error("Error initialising the timeouts for management server internal commands. Invalid format in commands.timeout: {}", commandPair); } } return commandTimeouts; @@ -557,7 +595,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } int wait = getTimeout(commands, timeout); - logger.debug(String.format("Wait time setting on %s is %d seconds", commands, wait)); + logger.debug("Wait time setting on {} is {} seconds", commands, wait); for (Command cmd : commands) { String simpleCommandName = cmd.getClass().getSimpleName(); Integer commandTimeout = _commandTimeouts.get(simpleCommandName); @@ -644,7 +682,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final long hostId = attache.getId(); logger.debug("Remove Agent : {}", attache); - AgentAttache removed = null; + AgentAttache removed; boolean conflict = false; synchronized (_agents) { removed = _agents.remove(hostId); @@ -697,16 +735,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } catch (final HypervisorVersionChangedException hvce) { handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException("Unable to connect " + (attache == null ? "" : attache.getId()), hvce); + throw new CloudRuntimeException("Unable to connect " + attache.getId(), hvce); } catch (final Exception e) { logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException("Unable to connect " + (attache == null ? "" : attache.getId()), e); + throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } } } - final Long dcId = host.getDataCenterId(); final ReadyCommand ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); ready.setWait(ReadyCommandWait.value()); final Answer answer = easySend(hostId, ready); @@ -757,6 +794,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _monitorExecutor.scheduleWithFixedDelay(new MonitorTask(), mgmtServiceConf.getPingInterval(), mgmtServiceConf.getPingInterval(), TimeUnit.SECONDS); + final int cleanupTime = Wait.value(); + newAgentConnectionsMonitor.scheduleAtFixedRate(new AgentNewConnectionsMonitorTask(), cleanupTime, + cleanupTime, TimeUnit.MINUTES); + return true; } @@ -775,25 +816,25 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Constructor constructor = clazz.getConstructor(); resource = (ServerResource)constructor.newInstance(); } catch (final ClassNotFoundException e) { - logger.warn("Unable to find class " + host.getResource(), e); + logger.warn("Unable to find class {}", host.getResource(), e); } catch (final InstantiationException e) { - logger.warn("Unable to instantiate class " + host.getResource(), e); + logger.warn("Unable to instantiate class {}", host.getResource(), e); } catch (final IllegalAccessException e) { - logger.warn("Illegal access " + host.getResource(), e); + logger.warn("Illegal access {}", host.getResource(), e); } catch (final SecurityException e) { - logger.warn("Security error on " + host.getResource(), e); + logger.warn("Security error on {}", host.getResource(), e); } catch (final NoSuchMethodException e) { - logger.warn("NoSuchMethodException error on " + host.getResource(), e); + logger.warn("NoSuchMethodException error on {}", host.getResource(), e); } catch (final IllegalArgumentException e) { - logger.warn("IllegalArgumentException error on " + host.getResource(), e); + logger.warn("IllegalArgumentException error on {}", host.getResource(), e); } catch (final InvocationTargetException e) { - logger.warn("InvocationTargetException error on " + host.getResource(), e); + logger.warn("InvocationTargetException error on {}", host.getResource(), e); } if (resource != null) { _hostDao.loadDetails(host); - final HashMap params = new HashMap(host.getDetails().size() + 5); + final HashMap params = new HashMap<>(host.getDetails().size() + 5); params.putAll(host.getDetails()); params.put("guid", host.getGuid()); @@ -803,7 +844,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (host.getClusterId() != null) { params.put("cluster", Long.toString(host.getClusterId())); - String guid = null; + String guid; final ClusterVO cluster = _clusterDao.findById(host.getClusterId()); if (cluster.getGuid() == null) { guid = host.getDetail("pool"); @@ -843,7 +884,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean forRebalance, final boolean isTransferredConnection) { boolean initialized = false; - ServerResource resource = null; + ServerResource resource; try { // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); @@ -873,18 +914,18 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Host h = _resourceMgr.createHostAndAgent(host.getId(), resource, host.getDetails(), false, null, true, isTransferredConnection); tapLoadingAgents(host.getId(), TapAgentsAction.Del); - return h == null ? false : true; + return h != null; } else { _executor.execute(new SimulateStartTask(host.getId(), host.getUuid(), host.getName(), resource, host.getDetails())); return true; } } - protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { + protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("create DirectAgentAttache for {}", host); final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), resource, host.isInMaintenanceStates()); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.put(host.getId(), attache); } @@ -918,6 +959,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _connectExecutor.shutdownNow(); _monitorExecutor.shutdownNow(); + newAgentConnectionsMonitor.shutdownNow(); return true; } @@ -949,7 +991,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { logger.info("Host {} is disconnecting with event {}", attache, event); - Status nextStatus = null; + Status nextStatus; final HostVO host = _hostDao.findById(hostId); if (host == null) { logger.warn("Can't find host with {} ({})", hostId, attache); @@ -1082,7 +1124,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void runInContext() { try { - if (_investigate == true) { + if (_investigate) { handleDisconnectWithInvestigation(_attache, _event); } else { handleDisconnectWithoutInvestigation(_attache, _event, true, false); @@ -1134,8 +1176,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public Answer[] send(final Long hostId, final Commands cmds) throws AgentUnavailableException, OperationTimedoutException { int wait = 0; if (cmds.size() > 1) { - logger.debug(String.format("Checking the wait time in seconds to be used for the following commands : %s. If there are multiple commands sent at once," + - "then max wait time of those will be used", cmds)); + logger.debug("Checking the wait time in seconds to be used for the following commands : {}. If there are multiple commands sent at once," + + "then max wait time of those will be used", cmds); } for (final Command cmd : cmds) { @@ -1198,7 +1240,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - AgentAttache attache = null; + AgentAttache attache; attache = findAttache(hostId); logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { @@ -1224,12 +1266,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return agentAttache != null; } - protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { + protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { logger.debug("create ConnectedAgentAttache for {}", host); final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.put(host.getId(), attache); } @@ -1254,7 +1296,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } ready.setArch(host.getArch().getType()); - AgentAttache attache = null; + AgentAttache attache; GlobalLock joinLock = getHostJoinLock(host.getId()); if (joinLock.lock(60)) { try { @@ -1280,7 +1322,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return attache; } - private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup, final Request request) { + private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup) { AgentAttache attache = null; ReadyCommand ready = null; try { @@ -1308,7 +1350,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl easySend(attache.getId(), ready); } } catch (final Exception e) { - logger.debug("Failed to send ready command:" + e.toString()); + logger.debug("Failed to send ready command:", e); } return attache; } @@ -1334,6 +1376,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl this.id = id; this.resource = resource; this.details = details; + this.uuid = uuid; + this.name = name; } @Override @@ -1382,10 +1426,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl startups[i] = (StartupCommand)_cmds[i]; } - final AgentAttache attache = handleConnectedAgent(_link, startups, _request); + final AgentAttache attache = handleConnectedAgent(_link, startups); if (attache == null) { logger.warn("Unable to create attache for agent: {}", _request); } + unregisterNewConnection(_link.getSocketAddress()); } } @@ -1402,7 +1447,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl break; } } - Response response = null; + Response response; response = new Response(request, answers[0], _nodeId, -1); try { link.send(response.toBytes()); @@ -1483,7 +1528,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } final long hostId = attache.getId(); - final String hostName = attache.getName(); if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { @@ -1502,7 +1546,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final Answer[] answers = new Answer[cmds.length]; for (int i = 0; i < cmds.length; i++) { cmd = cmds[i]; - Answer answer = null; + Answer answer; try { if (cmd instanceof StartupRoutingCommand) { final StartupRoutingCommand startup = (StartupRoutingCommand) cmd; @@ -1536,7 +1580,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl final long cmdHostId = ((PingCommand)cmd).getHostId(); boolean requestStartupCommand = false; - final HostVO host = _hostDao.findById(Long.valueOf(cmdHostId)); + final HostVO host = _hostDao.findById(cmdHostId); boolean gatewayAccessible = true; // if the router is sending a ping, verify the // gateway was pingable @@ -1586,7 +1630,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (logD) { logger.debug("SeqA {}-: Sending {}", attache.getId(), response.getSequence(), response); } else { - logger.trace("SeqA {}-: Sending {}" + attache.getId(), response.getSequence(), response); + logger.trace("SeqA {}-: Sending {} {}", response.getSequence(), response, attache.getId()); } try { link.send(response.toBytes()); @@ -1606,15 +1650,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void doTask(final Task task) throws TaskExecutionException { - final TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); - try { + try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { final Type type = task.getType(); - if (type == Task.Type.DATA) { + if (type == Type.DATA) { final byte[] data = task.getData(); try { final Request event = Request.parse(data); if (event instanceof Response) { - processResponse(task.getLink(), (Response)event); + processResponse(task.getLink(), (Response) event); } else { processRequest(task.getLink(), event); } @@ -1626,10 +1669,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.error(message); throw new TaskExecutionException(message, e); } - } else if (type == Task.Type.CONNECT) { - } else if (type == Task.Type.DISCONNECT) { + } else if (type == Type.CONNECT) { + } else if (type == Type.DISCONNECT) { final Link link = task.getLink(); - final AgentAttache attache = (AgentAttache)link.attachment(); + final AgentAttache attache = (AgentAttache) link.attachment(); if (attache != null) { disconnectWithInvestigation(attache, Event.AgentDisconnected); } else { @@ -1638,8 +1681,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl link.terminated(); } } - } finally { - txn.close(); } } } @@ -1668,21 +1709,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { - try { - _agentStatusLock.lock(); - logger.debug("[Resource state = {}, Agent event = , Host = {}]", - host.getResourceState(), e.toString(), host); + logger.debug("[Resource state = {}, Agent event = , Host = {}]", + host.getResourceState(), e.toString(), host); - host.setManagementServerId(msId); - try { - return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); - } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); - throw new CloudRuntimeException(String.format( - "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); - } - } finally { - _agentStatusLock.unlock(); + host.setManagementServerId(msId); + try { + return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); + } catch (final NoTransitionException e1) { + logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); + throw new CloudRuntimeException(String.format( + "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); } } @@ -1871,7 +1907,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } protected List findAgentsBehindOnPing() { - final List agentsBehind = new ArrayList(); + final List agentsBehind = new ArrayList<>(); final long cutoffTime = InaccurateClock.getTimeInSeconds() - mgmtServiceConf.getTimeout(); for (final Map.Entry entry : _pingMap.entrySet()) { if (entry.getValue() < cutoffTime) { @@ -1879,7 +1915,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - if (agentsBehind.size() > 0) { + if (!agentsBehind.isEmpty()) { logger.info("Found the following agents behind on ping: {}", agentsBehind); } @@ -1887,6 +1923,35 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + protected class AgentNewConnectionsMonitorTask extends ManagedContextRunnable { + @Override + protected void runInContext() { + logger.trace("Agent New Connections Monitor is started."); + final int cleanupTime = Wait.value(); + Set> entrySet = newAgentConnections.entrySet(); + long cutOff = System.currentTimeMillis() - (cleanupTime * 60 * 1000L); + if (logger.isDebugEnabled()) { + List expiredConnections = newAgentConnections.entrySet() + .stream() + .filter(e -> e.getValue() <= cutOff) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + logger.debug("Currently {} active new connections, of which {} have expired - {}", + entrySet.size(), + expiredConnections.size(), + StringUtils.join(expiredConnections)); + } + for (Map.Entry entry : entrySet) { + if (entry.getValue() <= cutOff) { + if (logger.isTraceEnabled()) { + logger.trace("Cleaning up new agent connection for {}", entry.getKey()); + } + newAgentConnections.remove(entry.getKey()); + } + } + } + } + protected class BehindOnPingListener implements Listener { @Override public boolean isRecurring() { @@ -1962,7 +2027,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, - DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, GranularWaitTimeForCommands }; + DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, + GranularWaitTimeForCommands, RemoteAgentSslHandshakeTimeout, RemoteAgentMaxConcurrentNewConnections }; } protected class SetHostParamsListener implements Listener { @@ -1997,7 +2063,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } if (((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.KVM || ((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.LXC) { - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(Config.RouterAggregationCommandEachTimeout.toString(), _configDao.getValue(Config.RouterAggregationCommandEachTimeout.toString())); params.put(Config.MigrateWait.toString(), _configDao.getValue(Config.MigrateWait.toString())); params.put(NetworkOrchestrationService.TUNGSTEN_ENABLED.key(), String.valueOf(NetworkOrchestrationService.TUNGSTEN_ENABLED.valueIn(host.getDataCenterId()))); @@ -2042,13 +2108,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (allHosts == null) { return null; } - Map> hostsByZone = new HashMap>(); + Map> hostsByZone = new HashMap<>(); for (HostVO host : allHosts) { if (host.getHypervisorType() == HypervisorType.KVM || host.getHypervisorType() == HypervisorType.LXC) { Long zoneId = host.getDataCenterId(); List hostIds = hostsByZone.get(zoneId); if (hostIds == null) { - hostIds = new ArrayList(); + hostIds = new ArrayList<>(); } hostIds.add(host.getId()); hostsByZone.put(zoneId, hostIds); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 732ce9d61f5..c667df5412e 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -56,6 +56,7 @@ import org.apache.cloudstack.maintenance.command.PrepareForShutdownManagementSer import org.apache.cloudstack.maintenance.command.TriggerShutdownManagementServerHostCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.security.SSLUtils; @@ -75,9 +76,6 @@ import com.cloud.cluster.ClusterManager; import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ClusterServicePdu; import com.cloud.cluster.ClusteredAgentRebalanceService; -import org.apache.cloudstack.management.ManagementServerHost; -import org.apache.commons.collections.CollectionUtils; - import com.cloud.cluster.ManagementServerHostVO; import com.cloud.cluster.agentlb.AgentLoadBalancerPlanner; import com.cloud.cluster.agentlb.HostTransferMapVO; @@ -107,6 +105,8 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.nio.Task; import com.google.gson.Gson; +import org.apache.commons.collections.CollectionUtils; + public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService { private static ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list @@ -114,7 +114,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public final static long STARTUP_DELAY = 5000; public final static long SCAN_INTERVAL = 90000; // 90 seconds, it takes 60 sec for xenserver to fail login public final static int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds - protected Set _agentToTransferIds = new HashSet(); + protected Set _agentToTransferIds = new HashSet<>(); Gson _gson; protected HashMap _peers; protected HashMap _sslEngines; @@ -151,17 +151,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust super(); } - protected final ConfigKey EnableLB = new ConfigKey(Boolean.class, "agent.lb.enabled", "Advanced", "false", "Enable agent load balancing between management server nodes", true); - protected final ConfigKey ConnectedAgentThreshold = new ConfigKey(Double.class, "agent.load.threshold", "Advanced", "0.7", + protected final ConfigKey EnableLB = new ConfigKey<>(Boolean.class, "agent.lb.enabled", "Advanced", "false", "Enable agent load balancing between management server nodes", true); + protected final ConfigKey ConnectedAgentThreshold = new ConfigKey<>(Double.class, "agent.load.threshold", "Advanced", "0.7", "What percentage of the agents can be held by one management server before load balancing happens", true, EnableLB.key()); - protected final ConfigKey LoadSize = new ConfigKey(Integer.class, "direct.agent.load.size", "Advanced", "16", "How many agents to connect to in each round", true); - protected final ConfigKey ScanInterval = new ConfigKey(Integer.class, "direct.agent.scan.interval", "Advanced", "90", "Interval between scans to load agents", false, + protected final ConfigKey LoadSize = new ConfigKey<>(Integer.class, "direct.agent.load.size", "Advanced", "16", "How many agents to connect to in each round", true); + protected final ConfigKey ScanInterval = new ConfigKey<>(Integer.class, "direct.agent.scan.interval", "Advanced", "90", "Interval between scans to load agents", false, ConfigKey.Scope.Global, 1000); @Override public boolean configure(final String name, final Map xmlParams) throws ConfigurationException { - _peers = new HashMap(7); - _sslEngines = new HashMap(7); + _peers = new HashMap<>(7); + _sslEngines = new HashMap<>(7); _nodeId = ManagementServerNode.getManagementServerId(); logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): {}", _nodeId); @@ -220,7 +220,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (hosts != null) { hosts.addAll(appliances); - if (hosts.size() > 0) { + if (!hosts.isEmpty()) { logger.debug("Found {} unmanaged direct hosts, processing connect for them...", hosts.size()); for (final HostVO host : hosts) { try { @@ -234,12 +234,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust continue; } } - - logger.debug("Loading directly connected host {}", host); + logger.debug("Loading directly connected {}", host); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host {}({}) due to ", - host, e); + logger.warn(" can not load directly connected {} due to ", host, e); } } } @@ -267,10 +265,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.debug("create forwarding ClusteredAgentAttache for {}", host); long id = host.getId(); final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName()); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { - old = _agents.get(id); - _agents.put(id, attache); + old = _agents.get(host.getId()); + _agents.put(host.getId(), attache); } if (old != null) { logger.debug("Remove stale agent attache from current management server"); @@ -284,7 +282,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.debug("create ClusteredAgentAttache for {}", host); final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.get(host.getId()); _agents.put(host.getId(), attache); @@ -299,7 +297,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("Create ClusteredDirectAgentAttache for {}.", host); final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); - AgentAttache old = null; + AgentAttache old; synchronized (_agents) { old = _agents.get(host.getId()); _agents.put(host.getId(), attache); @@ -418,12 +416,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public boolean routeToPeer(final String peer, final byte[] bytes) { int i = 0; SocketChannel ch = null; - SSLEngine sslEngine = null; + SSLEngine sslEngine; while (i++ < 5) { ch = connectToPeer(peer, ch); if (ch == null) { try { - logD(bytes, "Unable to route to peer: " + Request.parse(bytes).toString()); + logD(bytes, "Unable to route to peer: " + Request.parse(bytes)); } catch (ClassNotFoundException | UnsupportedVersionException e) { // Request.parse thrown exception when we try to log it, log as much as we can logD(bytes, "Unable to route to peer, and Request.parse further caught exception" + e.getMessage()); @@ -441,7 +439,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return true; } catch (final IOException e) { try { - logI(bytes, "Unable to route to peer: " + Request.parse(bytes).toString() + " due to " + e.getMessage()); + logI(bytes, "Unable to route to peer: " + Request.parse(bytes) + " due to " + e.getMessage()); } catch (ClassNotFoundException | UnsupportedVersionException ex) { // Request.parse thrown exception when we try to log it, log as much as we can logI(bytes, "Unable to route to peer due to" + e.getMessage() + ". Also caught exception when parsing request: " + ex.getMessage()); @@ -484,7 +482,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public SocketChannel connectToPeer(final String peerName, final SocketChannel prevCh) { synchronized (_peers) { final SocketChannel ch = _peers.get(peerName); - SSLEngine sslEngine = null; + SSLEngine sslEngine; if (prevCh != null) { try { prevCh.close(); @@ -569,13 +567,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host); + logger.debug("{} has switched to another management server, need to update agent map with a forwarding agent attache", host); agent = createAttache(host); } } if (agent == null) { final AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId); - ex.addProxyObject(_entityMgr.findById(Host.class, hostId).getUuid()); + ex.addProxyObject(host.getUuid()); throw ex; } @@ -617,9 +615,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void doTask(final Task task) throws TaskExecutionException { - final TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); - try { - if (task.getType() != Task.Type.DATA) { + try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { + if (task.getType() != Type.DATA) { super.doTask(task); return; } @@ -646,7 +643,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } final Request req = Request.parse(data); final Command[] cmds = req.getCommands(); - final CancelCommand cancel = (CancelCommand)cmds[0]; + final CancelCommand cancel = (CancelCommand) cmds[0]; logD(data, "Cancel request received"); agent.cancel(cancel.getSequence()); final Long current = agent._currentSequence; @@ -670,10 +667,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // to deserialize this and send it through the agent attache. final Request req = Request.parse(data); agent.send(req, null); - return; } else { if (agent instanceof Routable) { - final Routable cluster = (Routable)agent; + final Routable cluster = (Routable) agent; cluster.routeToAgent(data); } else { agent.send(Request.parse(data)); @@ -690,13 +686,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (mgmtId != -1 && mgmtId != _nodeId) { routeToPeer(Long.toString(mgmtId), data); if (Request.requiresSequentialExecution(data)) { - final AgentAttache attache = (AgentAttache)link.attachment(); + final AgentAttache attache = (AgentAttache) link.attachment(); if (attache != null) { attache.sendNext(Request.getSequence(data)); } - logD(data, "No attache to process " + Request.parse(data).toString()); + logD(data, "No attache to process " + Request.parse(data)); } - return; } else { if (Request.isRequest(data)) { super.doTask(task); @@ -712,7 +707,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.info("SeqA {}-{}: Response is not processed: {}", attache.getId(), response.getSequence(), response.toString()); } } - return; } } } catch (final ClassNotFoundException e) { @@ -723,8 +717,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final String message = String.format("UnsupportedVersionException occurred when executing tasks! Error '%s'", e.getMessage()); logger.error(message); throw new TaskExecutionException(message, e); - } finally { - txn.close(); } } } @@ -768,7 +760,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public boolean executeRebalanceRequest(final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event, boolean isConnectionTransfer) throws AgentUnavailableException, OperationTimedoutException { boolean result = false; if (event == Event.RequestAgentRebalance) { - return setToWaitForRebalance(agentId, currentOwnerId, futureOwnerId); + return setToWaitForRebalance(agentId); } else if (event == Event.StartAgentRebalance) { try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId, isConnectionTransfer); @@ -823,7 +815,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); final List allManagedAgents = sc.list(); - int avLoad = 0; + int avLoad; if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) { avLoad = allManagedAgents.size() / allMS.size(); @@ -841,7 +833,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust for (final ManagementServerHostVO node : allMS) { if (node.getMsid() != _nodeId) { - List hostsToRebalance = new ArrayList(); + List hostsToRebalance = new ArrayList<>(); for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) { hostsToRebalance = lbPlanner.getHostsToRebalance(node, avLoad); if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { @@ -867,7 +859,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust HostTransferMapVO transfer = null; try { transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); - final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); + final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId); if (answer == null) { logger.warn("Failed to get host {} from management server {}", host, node); result = false; @@ -894,8 +886,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - private Answer[] sendRebalanceCommand(final long peer, final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event) { - return sendRebalanceCommand(peer, agentId, currentOwnerId, futureOwnerId, event, false); + private Answer[] sendRebalanceCommand(final long peer, final long agentId, final long currentOwnerId, final long futureOwnerId) { + return sendRebalanceCommand(peer, agentId, currentOwnerId, futureOwnerId, Event.RequestAgentRebalance, false); } private Answer[] sendRebalanceCommand(final long peer, final long agentId, final long currentOwnerId, final long futureOwnerId, final Event event, final boolean isConnectionTransfer) { @@ -910,8 +902,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final String peerName = Long.toString(peer); final String cmdStr = _gson.toJson(cmds); final String ansStr = _clusterMgr.execute(peerName, agentId, cmdStr, true); - final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); - return answers; + return _gson.fromJson(ansStr, Answer[].class); } catch (final Exception e) { logger.warn("Caught exception while talking to {}", currentOwnerId, e); return null; @@ -960,7 +951,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust try { logger.trace("Clustered agent transfer scan check, management server id: {}", _nodeId); synchronized (_agentToTransferIds) { - if (_agentToTransferIds.size() > 0) { + if (!_agentToTransferIds.isEmpty()) { logger.debug("Found {} agents to transfer", _agentToTransferIds.size()); // for (Long hostId : _agentToTransferIds) { for (final Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext();) { @@ -984,7 +975,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - logger.debug(String.format("Management server %d doesn't own host id=%d (%s) any more, skipping rebalance for the host", _nodeId, hostId, attache)); + logger.debug("Management server {} doesn't own host id={} ({}) any more, skipping rebalance for the host", _nodeId, hostId, attache); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -1004,9 +995,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { logger.warn("Failed to submit rebalance task for host id={} ({}); postponing the execution", hostId, attache); - continue; } - } else { logger.debug("Agent {} ({}) can't be transferred yet as its request queue size is {} and listener queue size is {}", hostId, attache, attache.getQueueSize(), attache.getNonRecurringListenersSize()); @@ -1016,7 +1005,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.trace("Found no agents to be transferred by the management server {}", _nodeId); } } - } catch (final Throwable e) { logger.error("Problem with the clustered agent transfer scan check!", e); } @@ -1024,7 +1012,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust }; } - private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { + private boolean setToWaitForRebalance(final long hostId) { logger.debug("Adding agent {} ({}) to the list of agents to transfer", hostId, findAttache(hostId)); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); @@ -1065,7 +1053,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host {} as a part of rebalance process without notification", host); + logger.debug("Disconnecting {} as a part of rebalance process without notification", host); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1085,9 +1073,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); + logger.debug("Successfully loaded directly connected {} to the management server {} a part of rebalance process without notification", host, _nodeId); } else { - logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); + logger.warn("Failed to load directly connected {} to the management server {} a part of rebalance process without notification", host, _nodeId); } } @@ -1096,12 +1084,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { - final boolean success = event == Event.RebalanceCompleted ? true : false; + final boolean success = event == Event.RebalanceCompleted; final AgentAttache attache = findAttache(hostId); logger.debug("Finishing rebalancing for the agent {} ({}) with event {}", hostId, attache, event); - if (attache == null || !(attache instanceof ClusteredAgentAttache)) { + if (!(attache instanceof ClusteredAgentAttache)) { logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId); _hostTransferDao.completeAgentTransfer(hostId); return; @@ -1197,9 +1185,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } protected class RebalanceTask extends ManagedContextRunnable { - Long hostId = null; - Long currentOwnerId = null; - Long futureOwnerId = null; + Long hostId; + Long currentOwnerId; + Long futureOwnerId; public RebalanceTask(final long hostId, final long currentOwnerId, final long futureOwnerId) { this.hostId = hostId; @@ -1268,7 +1256,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; logger.debug("Intercepting command for agent change: agent {} event: {}", cmd.getAgentId(), cmd.getEvent()); - boolean result = false; + boolean result; try { result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent()); logger.debug("Result is {}", result); @@ -1285,7 +1273,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0]; logger.debug("Intercepting command for agent rebalancing: agent: {}, event: {}, connection transfer: {}", cmd.getAgentId(), cmd.getEvent(), cmd.isConnectionTransfer()); - boolean result = false; + boolean result; try { result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner(), cmd.isConnectionTransfer()); logger.debug("Result is {}", result); @@ -1305,7 +1293,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId())); - boolean result = false; + boolean result; try { result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent()); logger.debug("Result is {}", result); @@ -1403,23 +1391,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public boolean transferDirectAgentsFromMS(String fromMsUuid, long fromMsId, long timeoutDurationInMs) { if (timeoutDurationInMs <= 0) { - logger.debug(String.format("Not transferring direct agents from management server node %d (id: %s) to other nodes, invalid timeout duration", fromMsId, fromMsUuid)); + logger.debug("Not transferring direct agents from management server node {} (id: {}) to other nodes, invalid timeout duration", fromMsId, fromMsUuid); return false; } long transferStartTime = System.currentTimeMillis(); if (CollectionUtils.isEmpty(getDirectAgentHosts(fromMsId))) { - logger.info(String.format("No direct agent hosts available on management server node %d (id: %s), to transfer", fromMsId, fromMsUuid)); + logger.info("No direct agent hosts available on management server node {} (id: {}), to transfer", fromMsId, fromMsUuid); return true; } List msHosts = getUpMsHostsExcludingMs(fromMsId); if (msHosts.isEmpty()) { - logger.warn(String.format("No management server nodes available to transfer agents from management server node %d (id: %s)", fromMsId, fromMsUuid)); + logger.warn("No management server nodes available to transfer agents from management server node {} (id: {})", fromMsId, fromMsUuid); return false; } - logger.debug(String.format("Transferring direct agents from management server node %d (id: %s) to other nodes", fromMsId, fromMsUuid)); + logger.debug("Transferring direct agents from management server node {} (id: {}) to other nodes", fromMsId, fromMsUuid); int agentTransferFailedCount = 0; List dataCenterList = dcDao.listAll(); for (DataCenterVO dc : dataCenterList) { @@ -1427,11 +1415,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (CollectionUtils.isEmpty(directAgentHostsInDc)) { continue; } - logger.debug(String.format("Transferring %d direct agents from management server node %d (id: %s) of zone %s", directAgentHostsInDc.size(), fromMsId, fromMsUuid, dc.toString())); + logger.debug("Transferring {} direct agents from management server node {} (id: {}) of zone {}", directAgentHostsInDc.size(), fromMsId, fromMsUuid, dc); for (HostVO host : directAgentHostsInDc) { long transferElapsedTimeInMs = System.currentTimeMillis() - transferStartTime; if (transferElapsedTimeInMs >= timeoutDurationInMs) { - logger.debug(String.format("Stop transferring remaining direct agents from management server node %d (id: %s), timed out", fromMsId, fromMsUuid)); + logger.debug("Stop transferring remaining direct agents from management server node {} (id: {}), timed out", fromMsId, fromMsUuid); return false; } @@ -1449,7 +1437,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust updateLastManagementServer(host.getId(), fromMsId); } } catch (Exception e) { - logger.warn(String.format("Failed to transfer direct agent of the host %s from management server node %d (id: %s), due to %s", host, fromMsId, fromMsUuid, e.getMessage())); + logger.warn("Failed to transfer direct agent of the host {} from management server node {} (id: {}), due to {}", host, fromMsId, fromMsUuid, e.getMessage()); } } } @@ -1462,7 +1450,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust List hosts = _hostDao.listHostsByMs(msId); for (HostVO host : hosts) { AgentAttache agent = findAttache(host.getId()); - if (agent != null && agent instanceof DirectAgentAttache) { + if (agent instanceof DirectAgentAttache) { directAgentHosts.add(host); } } @@ -1475,7 +1463,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust List hosts = _hostDao.listHostsByMsAndDc(msId, dcId); for (HostVO host : hosts) { AgentAttache agent = findAttache(host.getId()); - if (agent != null && agent instanceof DirectAgentAttache) { + if (agent instanceof DirectAgentAttache) { directAgentHosts.add(host); } } @@ -1485,13 +1473,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust private List getUpMsHostsExcludingMs(long avoidMsId) { final List msHosts = _mshostDao.listBy(ManagementServerHost.State.Up); - Iterator iterator = msHosts.iterator(); - while (iterator.hasNext()) { - ManagementServerHostVO ms = iterator.next(); - if (ms.getMsid() == avoidMsId || _mshostPeerDao.findByPeerMsAndState(ms.getId(), ManagementServerHost.State.Up) == null) { - iterator.remove(); - } - } + msHosts.removeIf(ms -> ms.getMsid() == avoidMsId || _mshostPeerDao.findByPeerMsAndState(ms.getId(), ManagementServerHost.State.Up) == null); return msHosts; } @@ -1593,8 +1575,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public ConfigKey[] getConfigKeys() { final ConfigKey[] keys = super.getConfigKeys(); - final List> keysLst = new ArrayList>(); - keysLst.addAll(Arrays.asList(keys)); + final List> keysLst = new ArrayList<>(Arrays.asList(keys)); keysLst.add(EnableLB); keysLst.add(ConnectedAgentThreshold); keysLst.add(LoadSize); diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index a8b0130bdbc..7b231d02cb0 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -85,6 +85,7 @@ import org.apache.cloudstack.resource.ResourceCleanupService; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.cache.SingleCache; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.cloudstack.vm.UnmanagedVMsManager; @@ -406,6 +407,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private DomainDao domainDao; @Inject ResourceCleanupService resourceCleanupService; + @Inject + VmWorkJobDao vmWorkJobDao; + + private SingleCache> vmIdsInProgressCache; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -450,6 +455,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Long.class, "systemvm.root.disk.size", "-1", "Size of root volume (in GB) of system VMs and virtual routers", true); + private boolean syncTransitioningVmPowerState; + ScheduledExecutorService _executor = null; private long _nodeId; @@ -700,7 +707,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void handleUnsuccessfulExpungeOperation(List finalizeExpungeCommands, List nicExpungeCommands, VMInstanceVO vm, Long hostId) throws OperationTimedoutException, AgentUnavailableException { - if (CollectionUtils.isNotEmpty(finalizeExpungeCommands) || CollectionUtils.isNotEmpty(nicExpungeCommands) && (hostId != null)) { + if ((CollectionUtils.isNotEmpty(finalizeExpungeCommands) || CollectionUtils.isNotEmpty(nicExpungeCommands)) && hostId != null) { final Commands cmds = new Commands(Command.OnError.Stop); addAllExpungeCommandsFromList(finalizeExpungeCommands, cmds, vm); addAllExpungeCommandsFromList(nicExpungeCommands, cmds, vm); @@ -816,6 +823,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public boolean start() { + vmIdsInProgressCache = new SingleCache<>(10, vmWorkJobDao::listVmIdsWithPendingJob); _executor.scheduleAtFixedRate(new CleanupTask(), 5, VmJobStateReportInterval.value(), TimeUnit.SECONDS); _executor.scheduleAtFixedRate(new TransitionTask(), VmOpCleanupInterval.value(), VmOpCleanupInterval.value(), TimeUnit.SECONDS); cancelWorkItems(_nodeId); @@ -843,6 +851,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this)); + syncTransitioningVmPowerState = Boolean.TRUE.equals(VmSyncPowerStateTransitioning.value()); + return true; } @@ -3506,7 +3516,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId()) && (HypervisorType.VMware.equals(host.getHypervisorType()) || !checkIfVmHasClusterWideVolumes(vm.getId()))) { logger.info("Searching for hosts in the zone for vm migration"); - List clustersToExclude = _clusterDao.listAllClusters(host.getDataCenterId()); + List clustersToExclude = _clusterDao.listAllClusterIds(host.getDataCenterId()); List clusterList = _clusterDao.listByDcHyType(host.getDataCenterId(), host.getHypervisorType().toString()); for (ClusterVO cluster : clusterList) { clustersToExclude.remove(cluster.getId()); @@ -3800,7 +3810,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (ping.getHostVmStateReport() != null) { _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand()); } - scanStalledVMInTransitionStateOnUpHost(agentId); processed = true; } @@ -4757,7 +4766,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize, - AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName + AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName, + VmSyncPowerStateTransitioning }; } @@ -4955,20 +4965,46 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + /** + * Scans stalled VMs in transition states on an UP host and processes them accordingly. + * + *

This method is executed only when the {@code syncTransitioningVmPowerState} flag is enabled. It identifies + * VMs stuck in specific states (e.g., Starting, Stopping, Migrating) on a host that is UP, except for those + * in the Expunging state, which require special handling.

+ * + *

The following conditions are checked during the scan: + *

    + *
  • No pending {@code VmWork} job exists for the VM.
  • + *
  • The VM is associated with the given {@code hostId}, and the host is UP.
  • + *
+ *

+ * + *

When a host is UP, a state report for the VMs will typically be received. However, certain scenarios + * (e.g., out-of-band changes or behavior specific to hypervisors like XenServer or KVM) might result in + * missing reports, preventing the state-sync logic from running. To address this, the method scans VMs + * based on their last update timestamp. If a VM remains stalled without a status update while its host is UP, + * it is assumed to be powered off, which is generally a safe assumption.

+ * + * @param hostId the ID of the host to scan for stalled VMs in transition states. + */ private void scanStalledVMInTransitionStateOnUpHost(final long hostId) { - final long stallThresholdInMs = VmJobStateReportInterval.value() + (VmJobStateReportInterval.value() >> 1); - final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs); - final List mostlikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostId, cutTime); - for (final Long vmId : mostlikelyStoppedVMs) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + if (!syncTransitioningVmPowerState) { + return; + } + if (!_hostDao.isHostUp(hostId)) { + return; + } + final long stallThresholdInMs = VmJobStateReportInterval.value() * 2; + final long cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs).getTime(); + final List hostTransitionVms = _vmDao.listByHostAndState(hostId, State.Starting, State.Stopping, State.Migrating); + + final List mostLikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : mostLikelyStoppedVMs) { handlePowerOffReportWithNoPendingJobsOnVM(vm); } - final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostId, cutTime); - for (final Long vmId : vmsWithRecentReport) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : vmsWithRecentReport) { if (vm.getPowerState() == PowerState.PowerOn) { handlePowerOnReportWithNoPendingJobsOnVM(vm); } else { @@ -4977,6 +5013,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + private void scanStalledVMInTransitionStateOnDisconnectedHosts() { final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - VmOpWaitInterval.value() * 1000); final List stuckAndUncontrollableVMs = listStalledVMInTransitionStateOnDisconnectedHosts(cutTime); @@ -4989,89 +5026,58 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - private List listStalledVMInTransitionStateOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, JobInfo.Status.IN_PROGRESS.ordinal()); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\"} due to [{}].", sql, hostId, cutTimeStr, e.getMessage(), e); - } + private List listStalledVMInTransitionStateOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } - return l; + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() < cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } - private List listVMInTransitionStateWithRecentReportOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time > ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage(), e); - } - return l; + private List listVMInTransitionStateWithRecentReportOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() > cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } private List listStalledVMInTransitionStateOnDisconnectedHosts(final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status != 'UP' " + - "AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; + final String sql = "SELECT i.* " + + "FROM vm_instance AS i " + + "INNER JOIN host AS h ON i.host_id = h.id " + + "WHERE h.status != 'UP' " + + " AND i.power_state_update_time < ? " + + " AND i.state IN ('Starting', 'Stopping', 'Migrating') " + + " AND i.id NOT IN (SELECT vm_instance_id FROM vm_work_job AS w " + + " INNER JOIN async_job AS j ON w.id = j.id " + + " WHERE j.job_status = ?) " + + " AND i.removed IS NULL"; final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); + int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); + try { + PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setString(1, cutTimeStr); - pstmt.setInt(2, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); + pstmt.setString(1, cutTimeStr); + pstmt.setInt(2, jobStatusInProgress); + final ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + l.add(rs.getLong(1)); } - return l; + } catch (final SQLException e) { + logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); } + return l; } public class VmStateSyncOutcome extends OutcomeImpl { @@ -5953,29 +5959,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds) { + public HashMap getVirtualMachineStatistics(Host host, List vmIds) { HashMap vmStatsById = new HashMap<>(); if (CollectionUtils.isEmpty(vmIds)) { return vmStatsById; } - Map vmMap = new HashMap<>(); - for (Long vmId : vmIds) { - vmMap.put(vmId, _vmDao.findById(vmId)); - } - return getVirtualMachineStatistics(hostId, hostName, vmMap); + Map vmMap = _vmDao.getNameIdMapForVmIds(vmIds); + return getVirtualMachineStatistics(host, vmMap); } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap) { + public HashMap getVirtualMachineStatistics(Host host, Map vmInstanceNameIdMap) { HashMap vmStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM statistics."); return vmStatsById; @@ -5986,23 +5986,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmStatsById; } for (Map.Entry entry : vmStatsByName.entrySet()) { - vmStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmStatsById; } @Override - public HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmDiskStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmDiskStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmDiskStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM disk statistics."); return vmDiskStatsById; @@ -6013,23 +6010,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmDiskStatsById; } for (Map.Entry> entry: vmDiskStatsByName.entrySet()) { - vmDiskStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmDiskStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmDiskStatsById; } @Override - public HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmNetworkStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmNetworkStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmNetworkStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM network statistics."); return vmNetworkStatsById; @@ -6040,7 +6034,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmNetworkStatsById; } for (Map.Entry> entry: vmNetworkStatsByName.entrySet()) { - vmNetworkStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmNetworkStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmNetworkStatsById; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 94dddfdf18a..4b344ac4299 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -16,27 +16,29 @@ // under the License. package com.cloud.vm; -import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.utils.Pair; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.configuration.ManagementServiceConfiguration; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.utils.DateUtil; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.dao.VMInstanceDao; public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStateSync { @@ -47,7 +49,12 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Inject HostDao hostDao; @Inject ManagementServiceConfiguration mgmtServiceConf; + private LazyCache vmCache; + private LazyCache hostCache; + public VirtualMachinePowerStateSyncImpl() { + vmCache = new LazyCache<>(16, 10, this::getVmFromId); + hostCache = new LazyCache<>(16, 10, this::getHostFromId); } @Override @@ -58,130 +65,141 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Override public void processHostVmStateReport(long hostId, Map report) { - HostVO host = hostDao.findById(hostId); - logger.debug("Process host VM state report. host: {}", host); - - Map> translatedInfo = convertVmStateReport(report); - processReport(host, translatedInfo, false); + logger.debug("Process host VM state report. host: {}", hostCache.get(hostId)); + Map translatedInfo = convertVmStateReport(report); + processReport(hostId, translatedInfo, false); } @Override public void processHostVmStatePingReport(long hostId, Map report, boolean force) { - HostVO host = hostDao.findById(hostId); - logger.debug("Process host VM state report from ping process. host: {}", host); - - Map> translatedInfo = convertVmStateReport(report); - processReport(host, translatedInfo, force); + logger.debug("Process host VM state report from ping process. host: {}", hostCache.get(hostId)); + Map translatedInfo = convertVmStateReport(report); + processReport(hostId, translatedInfo, force); } - private void processReport(HostVO host, Map> translatedInfo, boolean force) { - - logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size()); - - for (Map.Entry> entry : translatedInfo.entrySet()) { - - logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - - if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); - } else { - logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second()); - } + private void updateAndPublishVmPowerStates(long hostId, Map instancePowerStates, + Date updateTime) { + if (instancePowerStates.isEmpty()) { + return; } + Set vmIds = instancePowerStates.keySet(); + Map notUpdated = _instanceDao.updatePowerState(instancePowerStates, hostId, + updateTime); + if (notUpdated.size() > vmIds.size()) { + return; + } + for (Long vmId : vmIds) { + if (!notUpdated.isEmpty() && !notUpdated.containsKey(vmId)) { + logger.debug("VM state report is updated. {}, {}, power state: {}", + () -> hostCache.get(hostId), () -> vmCache.get(vmId), () -> instancePowerStates.get(vmId)); + _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, + PublishScope.GLOBAL, vmId); + continue; + } + logger.trace("VM power state does not change, skip DB writing. {}", () -> vmCache.get(vmId)); + } + } + private List filterOutdatedFromMissingVmReport(List vmsThatAreMissingReport) { + List outdatedVms = vmsThatAreMissingReport.stream() + .filter(v -> !_instanceDao.isPowerStateUpToDate(v)) + .map(VMInstanceVO::getId) + .collect(Collectors.toList()); + if (CollectionUtils.isEmpty(outdatedVms)) { + return vmsThatAreMissingReport; + } + _instanceDao.resetVmPowerStateTracking(outdatedVms); + return vmsThatAreMissingReport.stream() + .filter(v -> !outdatedVms.contains(v.getId())) + .collect(Collectors.toList()); + } + + private void processMissingVmReport(long hostId, Set vmIds, boolean force) { // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running, - VirtualMachine.State.Stopping, VirtualMachine.State.Starting); - java.util.Iterator it = vmsThatAreMissingReport.iterator(); - while (it.hasNext()) { - VMInstanceVO instance = it.next(); - if (translatedInfo.get(instance.getId()) != null) - it.remove(); + List vmsThatAreMissingReport = _instanceDao.findByHostInStatesExcluding(hostId, vmIds, + VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); + // here we need to be wary of out of band migration as opposed to other, more unexpected state changes + if (vmsThatAreMissingReport.isEmpty()) { + return; + } + Date currentTime = DateUtil.currentGMTTime(); + logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); + if (!force) { + vmsThatAreMissingReport = filterOutdatedFromMissingVmReport(vmsThatAreMissingReport); } - // here we need to be wary of out of band migration as opposed to other, more unexpected state changes - if (vmsThatAreMissingReport.size() > 0) { - Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime()); - - // 2 times of sync-update interval for graceful period - long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; - - for (VMInstanceVO instance : vmsThatAreMissingReport) { - - // Make sure powerState is up to date for missing VMs - try { - if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance); - _instanceDao.resetVmPowerStateTracking(instance.getId()); - continue; - } - } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e); - continue; - } - - Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + // 2 times of sync-update interval for graceful period + long milliSecondsGracefulPeriod = mgmtServiceConf.getPingInterval() * 2000L; + Map instancePowerStates = new HashMap<>(); + for (VMInstanceVO instance : vmsThatAreMissingReport) { + Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + if (vmStateUpdateTime == null) { + logger.warn("VM power state update time is null, falling back to update time for {}", instance); + vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance); - vmStateUpdateTime = instance.getUpdateTime(); - if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm: {}", instance); - vmStateUpdateTime = instance.getCreated(); - } - } - - String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}", - host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime); - - long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); - - if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate); - - // this is were a race condition might have happened if we don't re-fetch the instance; - // between the startime of this job and the currentTime of this missing-branch - // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); - } else { - logger.debug("VM power state does not change, skip DB writing. vm: {}", instance); - } - } else { - logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate); + logger.warn("VM update time is null, falling back to creation time for {}", instance); + vmStateUpdateTime = instance.getCreated(); } } + logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}", + hostId, + instance.getId(), + instance.getUuid(), + VirtualMachine.PowerState.PowerReportMissing, + DateUtil.getOutputString(vmStateUpdateTime)); + long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); + if (force || (milliSecondsSinceLastStateUpdate > milliSecondsGracefulPeriod)) { + logger.debug("vm id: {} - time since last state update({} ms) has passed graceful period", + instance.getId(), milliSecondsSinceLastStateUpdate); + // this is where a race condition might have happened if we don't re-fetch the instance; + // between the startime of this job and the currentTime of this missing-branch + // an update might have occurred that we should not override in case of out of band migration + instancePowerStates.put(instance.getId(), VirtualMachine.PowerState.PowerReportMissing); + } else { + logger.debug("vm id: {} - time since last state update({} ms) has not passed graceful period yet", + instance.getId(), milliSecondsSinceLastStateUpdate); + } } - - logger.debug("Done with process of VM state report. host: {}", host); + updateAndPublishVmPowerStates(hostId, instancePowerStates, startTime); } - public Map> convertVmStateReport(Map states) { - final HashMap> map = new HashMap<>(); - if (states == null) { + private void processReport(long hostId, Map translatedInfo, boolean force) { + logger.debug("Process VM state report. {}, number of records in report: {}. VMs: [{}]", + () -> hostCache.get(hostId), + translatedInfo::size, + () -> translatedInfo.entrySet().stream().map(entry -> entry.getKey() + ":" + entry.getValue()) + .collect(Collectors.joining(", ")) + "]"); + updateAndPublishVmPowerStates(hostId, translatedInfo, DateUtil.currentGMTTime()); + + processMissingVmReport(hostId, translatedInfo.keySet(), force); + + logger.debug("Done with process of VM state report. host: {}", () -> hostCache.get(hostId)); + } + + public Map convertVmStateReport(Map states) { + final HashMap map = new HashMap<>(); + if (MapUtils.isEmpty(states)) { return map; } - + Map nameIdMap = _instanceDao.getNameIdMapForVmInstanceNames(states.keySet()); for (Map.Entry entry : states.entrySet()) { - VMInstanceVO vm = findVM(entry.getKey()); - if (vm != null) { - map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm)); + Long id = nameIdMap.get(entry.getKey()); + if (id != null) { + map.put(id, entry.getValue().getState()); } else { logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue()); } } - return map; } - private VMInstanceVO findVM(String vmName) { - return _instanceDao.findVMByInstanceName(vmName); + protected VMInstanceVO getVmFromId(long vmId) { + return _instanceDao.findById(vmId); + } + + protected HostVO getHostFromId(long hostId) { + return hostDao.findById(hostId); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 64eb2ac024b..b0081c6e685 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -4872,7 +4872,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[]{NetworkGcWait, NetworkGcInterval, NetworkLockTimeout, + return new ConfigKey[]{NetworkGcWait, NetworkGcInterval, NetworkLockTimeout, DeniedRoutes, GuestDomainSuffix, NetworkThrottlingRate, MinVRVersion, PromiscuousMode, MacAddressChanges, ForgedTransmits, MacLearning, RollingRestartEnabled, TUNGSTEN_ENABLED, NSX_ENABLED }; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 06061908888..db0119febde 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1807,7 +1807,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host)); + throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host), e); } } @@ -1847,7 +1847,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { volService.grantAccess(volFactory.getVolume(volumeId), host, volumeStore); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host)); + throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e); } } @@ -1928,7 +1928,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati try { volService.grantAccess(volFactory.getVolume(vol.getId()), host, store); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host)); + throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e); } } else { grantVolumeAccessToHostIfNeeded(store, vol.getId(), host, volToString); diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java index 9616f31d0c5..1bb79ce417a 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java @@ -28,6 +28,8 @@ import com.cloud.utils.db.GenericDao; public interface CapacityDao extends GenericDao { CapacityVO findByHostIdType(Long hostId, short capacityType); + List listByHostIdTypes(Long hostId, List capacityTypes); + List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone); List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType); diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java index 3acae985af4..5e7eee4566c 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -671,6 +671,18 @@ public class CapacityDaoImpl extends GenericDaoBase implements return findOneBy(sc); } + @Override + public List listByHostIdTypes(Long hostId, List capacityTypes) { + SearchBuilder sb = createSearchBuilder(); + sb.and("hostId", sb.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getCapacityType(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("hostId", hostId); + sc.setParameters("type", capacityTypes.toArray()); + return listBy(sc); + } + @Override public List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java index 06c9c525504..27cea8d5c2d 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java @@ -16,11 +16,13 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.Map; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; -public interface ClusterDetailsDao extends GenericDao { +public interface ClusterDetailsDao extends GenericDao, ResourceDetailsDao { Map findDetails(long clusterId); void persist(long clusterId, Map details); @@ -29,6 +31,8 @@ public interface ClusterDetailsDao extends GenericDao { ClusterDetailsVO findDetail(long clusterId, String name); + Map findDetails(long clusterId, Collection names); + void deleteDetails(long clusterId); String getVmwareDcName(Long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java index 0e40f8475c1..4c752ff9b4f 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java @@ -16,21 +16,33 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; + +import javax.inject.Inject; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.commons.collections.CollectionUtils; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.org.Cluster; +import com.cloud.utils.Pair; import com.cloud.utils.crypt.DBEncryptionUtil; -import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; + +public class ClusterDetailsDaoImpl extends ResourceDetailsDaoBase implements ClusterDetailsDao, ScopedConfigStorage { + + @Inject + ClusterDao clusterDao; -public class ClusterDetailsDaoImpl extends GenericDaoBase implements ClusterDetailsDao, ScopedConfigStorage { protected final SearchBuilder ClusterSearch; protected final SearchBuilder DetailSearch; @@ -41,11 +53,11 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase findDetails(long clusterId) { SearchCriteria sc = ClusterSearch.create(); @@ -82,6 +99,23 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase findDetails(long clusterId, Collection names) { + if (CollectionUtils.isEmpty(names)) { + return new HashMap<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("clusterId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("name", names.toArray()); + List results = search(sc, null); + return results.stream() + .collect(Collectors.toMap(ClusterDetailsVO::getName, ClusterDetailsVO::getValue)); + } + @Override public void deleteDetails(long clusterId) { SearchCriteria sc = ClusterSearch.create(); @@ -160,4 +194,13 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase getParentScope(long id) { + Cluster cluster = clusterDao.findById(id); + if (cluster == null) { + return null; + } + return new Pair<>(getScope().getParent(), cluster.getDataCenterId()); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsVO.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsVO.java index 6eb9e7466a7..b213f8f2594 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsVO.java @@ -23,11 +23,11 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; -import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.api.ResourceDetail; @Entity @Table(name = "cluster_details") -public class ClusterDetailsVO implements InternalIdentity { +public class ClusterDetailsVO implements ResourceDetail { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @@ -35,7 +35,7 @@ public class ClusterDetailsVO implements InternalIdentity { private long id; @Column(name = "cluster_id") - private long clusterId; + private long resourceId; @Column(name = "name") private String name; @@ -47,13 +47,14 @@ public class ClusterDetailsVO implements InternalIdentity { } public ClusterDetailsVO(long clusterId, String name, String value) { - this.clusterId = clusterId; + this.resourceId = clusterId; this.name = name; this.value = value; } - public long getClusterId() { - return clusterId; + @Override + public long getResourceId() { + return resourceId; } public String getName() { @@ -64,6 +65,11 @@ public class ClusterDetailsVO implements InternalIdentity { return value; } + @Override + public boolean isDisplay() { + return true; + } + public void setValue(String value) { this.value = value; } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index 6ecfdaeb058..bf12abd5114 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -16,15 +16,15 @@ // under the License. package com.cloud.dc.dao; +import java.util.List; +import java.util.Map; +import java.util.Set; + import com.cloud.cpu.CPU; import com.cloud.dc.ClusterVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; -import java.util.List; -import java.util.Map; -import java.util.Set; - public interface ClusterDao extends GenericDao { List listByPodId(long podId); @@ -36,7 +36,7 @@ public interface ClusterDao extends GenericDao { List getAvailableHypervisorInZone(Long zoneId); - Set getDistictAvailableHypervisorsAcrossClusters(); + Set getDistinctAvailableHypervisorsAcrossClusters(); List listByDcHyType(long dcId, String hyType); @@ -46,9 +46,13 @@ public interface ClusterDao extends GenericDao { List listClustersWithDisabledPods(long zoneId); + Integer countAllByDcId(long zoneId); + + Integer countAllManagedAndEnabledByDcId(long zoneId); + List listClustersByDcId(long zoneId); - List listAllClusters(Long zoneId); + List listAllClusterIds(Long zoneId); boolean getSupportsResigning(long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index 9a56f0f2d94..af6b8397643 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -16,25 +16,6 @@ // under the License. package com.cloud.dc.dao; -import com.cloud.cpu.CPU; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterDetailsVO; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.org.Grouping; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.TransactionLegacy; -import com.cloud.utils.exception.CloudRuntimeException; -import org.springframework.stereotype.Component; - -import javax.inject.Inject; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -46,6 +27,28 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + +import com.cloud.cpu.CPU; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.org.Grouping; +import com.cloud.org.Managed; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; + @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -58,7 +61,6 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ClusterSearch; protected final SearchBuilder ClusterDistinctArchSearch; protected final SearchBuilder ClusterArchSearch; - protected GenericSearchBuilder ClusterIdSearch; private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; @@ -98,6 +100,8 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ZoneClusterSearch = createSearchBuilder(); ZoneClusterSearch.and("dataCenterId", ZoneClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneClusterSearch.and("allocationState", ZoneClusterSearch.entity().getAllocationState(), Op.EQ); + ZoneClusterSearch.and("managedState", ZoneClusterSearch.entity().getManagedState(), Op.EQ); ZoneClusterSearch.done(); ClusterIdSearch = createSearchBuilder(Long.class); @@ -167,23 +171,15 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("zoneId", zoneId); } List clusters = listBy(sc); - List hypers = new ArrayList(4); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - - return hypers; + return clusters.stream() + .map(ClusterVO::getHypervisorType) + .distinct() + .collect(Collectors.toList()); } @Override - public Set getDistictAvailableHypervisorsAcrossClusters() { - SearchCriteria sc = ClusterSearch.create(); - List clusters = listBy(sc); - Set hypers = new HashSet<>(); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - return hypers; + public Set getDistinctAvailableHypervisorsAcrossClusters() { + return new HashSet<>(getAvailableHypervisorInZone(null)); } @Override @@ -266,6 +262,23 @@ public class ClusterDaoImpl extends GenericDaoBase implements C return customSearch(sc, null); } + @Override + public Integer countAllByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + return getCount(sc); + } + + @Override + public Integer countAllManagedAndEnabledByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + sc.setParameters("allocationState", Grouping.AllocationState.Enabled); + sc.setParameters("managedState", Managed.ManagedState.Managed); + + return getCount(sc); + } + @Override public List listClustersByDcId(long zoneId) { SearchCriteria sc = ZoneClusterSearch.create(); @@ -289,7 +302,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C } @Override - public List listAllClusters(Long zoneId) { + public List listAllClusterIds(Long zoneId) { SearchCriteria sc = ClusterIdSearch.create(); if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index 48b9c83c64c..ba01e31f80a 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -294,8 +294,7 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase result = listBy(sc); - return result.size(); + return getCount(sc); } public DataCenterIpAddressDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index 1c29e6a944c..ff668249779 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -81,7 +81,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase sc = DcSearchAllocated.create(); sc.setParameters("physicalNetworkId", physicalNetworkId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java index df5a2283baa..6f803cc9f2f 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java @@ -23,18 +23,18 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; -import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.api.ResourceDetail; @Entity @Table(name = "domain_details") -public class DomainDetailVO implements InternalIdentity { +public class DomainDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Column(name = "id") private long id; @Column(name = "domain_id") - private long domainId; + private long resourceId; @Column(name = "name") private String name; @@ -46,13 +46,14 @@ public class DomainDetailVO implements InternalIdentity { } public DomainDetailVO(long domainId, String name, String value) { - this.domainId = domainId; + this.resourceId = domainId; this.name = name; this.value = value; } - public long getDomainId() { - return domainId; + @Override + public long getResourceId() { + return resourceId; } public String getName() { @@ -63,6 +64,11 @@ public class DomainDetailVO implements InternalIdentity { return value; } + @Override + public boolean isDisplay() { + return true; + } + public void setValue(String value) { this.value = value; } diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java index 6b53e49764e..ae149ff4381 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java @@ -20,8 +20,9 @@ import java.util.Map; import com.cloud.domain.DomainDetailVO; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; -public interface DomainDetailsDao extends GenericDao { +public interface DomainDetailsDao extends GenericDao, ResourceDetailsDao { Map findDetails(long domainId); void persist(long domainId, Map details); @@ -31,6 +32,4 @@ public interface DomainDetailsDao extends GenericDao { void deleteDetails(long domainId); void update(long domainId, Map details); - - String getActualValue(DomainDetailVO domainDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java index b9721a2e58c..5b4e4c591ff 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java @@ -25,19 +25,17 @@ import javax.inject.Inject; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import com.cloud.domain.DomainDetailVO; import com.cloud.domain.DomainVO; -import com.cloud.utils.crypt.DBEncryptionUtil; -import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; -public class DomainDetailsDaoImpl extends GenericDaoBase implements DomainDetailsDao, ScopedConfigStorage { +public class DomainDetailsDaoImpl extends ResourceDetailsDaoBase implements DomainDetailsDao, ScopedConfigStorage { protected final SearchBuilder domainSearch; @Inject @@ -47,14 +45,14 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i protected DomainDetailsDaoImpl() { domainSearch = createSearchBuilder(); - domainSearch.and("domainId", domainSearch.entity().getDomainId(), Op.EQ); + domainSearch.and("domainId", domainSearch.entity().getResourceId(), Op.EQ); domainSearch.done(); } @Override public Map findDetails(long domainId) { QueryBuilder sc = QueryBuilder.create(DomainDetailVO.class); - sc.and(sc.entity().getDomainId(), Op.EQ, domainId); + sc.and(sc.entity().getResourceId(), Op.EQ, domainId); List results = sc.list(); Map details = new HashMap(results.size()); for (DomainDetailVO r : results) { @@ -80,11 +78,16 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i @Override public DomainDetailVO findDetail(long domainId, String name) { QueryBuilder sc = QueryBuilder.create(DomainDetailVO.class); - sc.and(sc.entity().getDomainId(), Op.EQ, domainId); + sc.and(sc.entity().getResourceId(), Op.EQ, domainId); sc.and(sc.entity().getName(), Op.EQ, name); return sc.find(); } + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new DomainDetailVO(resourceId, key, value)); + } + @Override public void deleteDetails(long domainId) { SearchCriteria sc = domainSearch.create(); @@ -129,13 +132,4 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i } return vo == null ? null : getActualValue(vo); } - - @Override - public String getActualValue(DomainDetailVO domainDetailVO) { - ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName()); - if (configurationVO != null && configurationVO.isEncrypted()) { - return DBEncryptionUtil.decrypt(domainDetailVO.getValue()); - } - return domainDetailVO.getValue(); - } } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index abdf50ab399..cfd75b1a94b 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -27,6 +27,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.info.RunningHostCountInfo; import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; @@ -39,8 +40,14 @@ public interface HostDao extends GenericDao, StateDao status); + Integer countAllByTypeInZone(long zoneId, final Host.Type type); + Integer countUpAndEnabledHostsInZone(long zoneId); + + Pair countAllHostsAndCPUSocketsByType(Type type); + /** * Mark all hosts associated with a certain management server * as disconnected. @@ -75,32 +82,41 @@ public interface HostDao extends GenericDao, StateDao findHypervisorHostInCluster(long clusterId); + HostVO findAnyStateHypervisorHostInCluster(long clusterId); + HostVO findOldestExistentHypervisorHostInCluster(long clusterId); List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag); List findByDataCenterId(Long zoneId); + List listIdsByDataCenterId(Long zoneId); + List findByPodId(Long podId); + List listIdsByPodId(Long podId); + List findByClusterId(Long clusterId); + List listIdsByClusterId(Long clusterId); + + List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId); + + List listIdsByType(Type type); + + List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType); + List findByClusterIdAndEncryptionSupport(Long clusterId); /** - * Returns hosts that are 'Up' and 'Enabled' from the given Data Center/Zone + * Returns host Ids that are 'Up' and 'Enabled' from the given Data Center/Zone */ - List listByDataCenterId(long id); + List listEnabledIdsByDataCenterId(long id); /** - * Returns hosts that are from the given Data Center/Zone and at a given state (e.g. Creating, Enabled, Disabled, etc). + * Returns host Ids that are 'Up' and 'Disabled' from the given Data Center/Zone */ - List listByDataCenterIdAndState(long id, ResourceState state); - - /** - * Returns hosts that are 'Up' and 'Disabled' from the given Data Center/Zone - */ - List listDisabledByDataCenterId(long id); + List listDisabledIdsByDataCenterId(long id); List listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType); @@ -110,8 +126,6 @@ public interface HostDao extends GenericDao, StateDao listAllHostsThatHaveNoRuleTag(Host.Type type, Long clusterId, Long podId, Long dcId); - List listAllHostsByType(Host.Type type); - HostVO findByPublicIp(String publicIp); List listClustersByHostTag(String hostTagOnOffering); @@ -182,4 +196,14 @@ public interface HostDao extends GenericDao, StateDao findClustersThatMatchHostTagRule(String computeOfferingTags); List listSsvmHostsWithPendingMigrateJobsOrderedByJobCount(); + + boolean isHostUp(long hostId); + + List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes); + + List listDistinctHypervisorTypes(final Long zoneId); + + List listByIds(final List ids); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 4e1be3ae0fb..54146e55049 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -45,8 +46,8 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.gpu.dao.HostGpuGroupsDao; import com.cloud.gpu.dao.VGPUTypesDao; -import com.cloud.host.Host; import com.cloud.host.DetailVO; +import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; @@ -59,6 +60,8 @@ import com.cloud.org.Grouping; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -74,19 +77,17 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; -import java.util.Arrays; - @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { - private static final String LIST_HOST_IDS_BY_COMPUTETAGS = "SELECT filtered.host_id, COUNT(filtered.tag) AS tag_count " - + "FROM (SELECT host_id, tag, is_tag_a_rule FROM host_tags GROUP BY host_id,tag) AS filtered " - + "WHERE tag IN(%s) AND is_tag_a_rule = 0 " + private static final String LIST_HOST_IDS_BY_HOST_TAGS = "SELECT filtered.host_id, COUNT(filtered.tag) AS tag_count " + + "FROM (SELECT host_id, tag, is_tag_a_rule FROM host_tags GROUP BY host_id,tag,is_tag_a_rule) AS filtered " + + "WHERE tag IN (%s) AND (is_tag_a_rule = 0 OR is_tag_a_rule IS NULL) " + "GROUP BY host_id " + "HAVING tag_count = %s "; private static final String SEPARATOR = ","; - private static final String LIST_CLUSTERID_FOR_HOST_TAG = "select distinct cluster_id from host join ( %s ) AS selected_hosts ON host.id = selected_hosts.host_id"; + private static final String LIST_CLUSTER_IDS_FOR_HOST_TAGS = "select distinct cluster_id from host join ( %s ) AS selected_hosts ON host.id = selected_hosts.host_id"; private static final String GET_HOSTS_OF_ACTIVE_VMS = "select h.id " + "from vm_instance vm " + "join host h on (vm.host_id=h.id) " + @@ -98,6 +99,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder TypePodDcStatusSearch; + protected SearchBuilder IdsSearch; protected SearchBuilder IdStatusSearch; protected SearchBuilder TypeDcSearch; protected SearchBuilder TypeDcStatusSearch; @@ -127,6 +129,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder ResponsibleMsSearch; protected SearchBuilder ResponsibleMsDcSearch; protected GenericSearchBuilder ResponsibleMsIdSearch; + protected SearchBuilder HostTypeClusterCountSearch; protected SearchBuilder HostTypeZoneCountSearch; protected SearchBuilder ClusterStatusSearch; protected SearchBuilder TypeNameZoneSearch; @@ -138,8 +141,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder ManagedRoutingServersSearch; protected SearchBuilder SecondaryStorageVMSearch; - protected GenericSearchBuilder HostIdSearch; - protected GenericSearchBuilder HostsInStatusSearch; + protected GenericSearchBuilder HostsInStatusesSearch; protected GenericSearchBuilder CountRoutingByDc; protected SearchBuilder HostTransferSearch; protected SearchBuilder ClusterManagedSearch; @@ -189,6 +191,8 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostTypeCountSearch = createSearchBuilder(); HostTypeCountSearch.and("type", HostTypeCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("zoneId", HostTypeCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("resourceState", HostTypeCountSearch.entity().getResourceState(), SearchCriteria.Op.EQ); HostTypeCountSearch.done(); ResponsibleMsSearch = createSearchBuilder(); @@ -205,6 +209,13 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ResponsibleMsIdSearch.and("managementServerId", ResponsibleMsIdSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); ResponsibleMsIdSearch.done(); + HostTypeClusterCountSearch = createSearchBuilder(); + HostTypeClusterCountSearch.and("cluster", HostTypeClusterCountSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("type", HostTypeClusterCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("status", HostTypeClusterCountSearch.entity().getStatus(), SearchCriteria.Op.IN); + HostTypeClusterCountSearch.and("removed", HostTypeClusterCountSearch.entity().getRemoved(), SearchCriteria.Op.NULL); + HostTypeClusterCountSearch.done(); + HostTypeZoneCountSearch = createSearchBuilder(); HostTypeZoneCountSearch.and("type", HostTypeZoneCountSearch.entity().getType(), SearchCriteria.Op.EQ); HostTypeZoneCountSearch.and("dc", HostTypeZoneCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); @@ -252,6 +263,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.done(); + IdsSearch = createSearchBuilder(); + IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN); + IdsSearch.done(); + IdStatusSearch = createSearchBuilder(); IdStatusSearch.and("id", IdStatusSearch.entity().getId(), SearchCriteria.Op.EQ); IdStatusSearch.and("states", IdStatusSearch.entity().getStatus(), SearchCriteria.Op.IN); @@ -398,14 +413,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao AvailHypevisorInZone.groupBy(AvailHypevisorInZone.entity().getHypervisorType()); AvailHypevisorInZone.done(); - HostsInStatusSearch = createSearchBuilder(Long.class); - HostsInStatusSearch.selectFields(HostsInStatusSearch.entity().getId()); - HostsInStatusSearch.and("dc", HostsInStatusSearch.entity().getDataCenterId(), Op.EQ); - HostsInStatusSearch.and("pod", HostsInStatusSearch.entity().getPodId(), Op.EQ); - HostsInStatusSearch.and("cluster", HostsInStatusSearch.entity().getClusterId(), Op.EQ); - HostsInStatusSearch.and("type", HostsInStatusSearch.entity().getType(), Op.EQ); - HostsInStatusSearch.and("statuses", HostsInStatusSearch.entity().getStatus(), Op.IN); - HostsInStatusSearch.done(); + HostsInStatusesSearch = createSearchBuilder(Long.class); + HostsInStatusesSearch.selectFields(HostsInStatusesSearch.entity().getId()); + HostsInStatusesSearch.and("dc", HostsInStatusesSearch.entity().getDataCenterId(), Op.EQ); + HostsInStatusesSearch.and("pod", HostsInStatusesSearch.entity().getPodId(), Op.EQ); + HostsInStatusesSearch.and("cluster", HostsInStatusesSearch.entity().getClusterId(), Op.EQ); + HostsInStatusesSearch.and("type", HostsInStatusesSearch.entity().getType(), Op.EQ); + HostsInStatusesSearch.and("statuses", HostsInStatusesSearch.entity().getStatus(), Op.IN); + HostsInStatusesSearch.done(); CountRoutingByDc = createSearchBuilder(Long.class); CountRoutingByDc.select(null, Func.COUNT, null); @@ -468,11 +483,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostsInClusterSearch.and("server", HostsInClusterSearch.entity().getManagementServerId(), SearchCriteria.Op.NNULL); HostsInClusterSearch.done(); - HostIdSearch = createSearchBuilder(Long.class); - HostIdSearch.selectFields(HostIdSearch.entity().getId()); - HostIdSearch.and("dataCenterId", HostIdSearch.entity().getDataCenterId(), Op.EQ); - HostIdSearch.done(); - searchBuilderFindByRuleTag = _hostTagsDao.createSearchBuilder(); searchBuilderFindByRuleTag.and("is_tag_a_rule", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.EQ); searchBuilderFindByRuleTag.or("tagDoesNotExist", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.NULL); @@ -504,8 +514,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("resourceState", (Object[])states); sc.setParameters("cluster", clusterId); - List hosts = listBy(sc); - return hosts.size(); + return getCount(sc); } @Override @@ -516,36 +525,62 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } @Override - public Integer countAllByTypeInZone(long zoneId, Type type) { - SearchCriteria sc = HostTypeCountSearch.create(); - sc.setParameters("type", type); - sc.setParameters("dc", zoneId); + public Integer countAllInClusterByTypeAndStates(Long clusterId, final Host.Type type, List status) { + SearchCriteria sc = HostTypeClusterCountSearch.create(); + if (clusterId != null) { + sc.setParameters("cluster", clusterId); + } + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status.toArray()); + } return getCount(sc); } @Override - public List listByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Enabled); + public Integer countAllByTypeInZone(long zoneId, Type type) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", type); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listByDataCenterIdAndState(long id, ResourceState state) { - SearchCriteria sc = scHostsFromZoneUpRouting(id); - sc.setParameters("resourceState", state); - return listBy(sc); + public Integer countUpAndEnabledHostsInZone(long zoneId) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", Type.Routing); + sc.setParameters("resourceState", ResourceState.Enabled); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listDisabledByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Disabled); + public Pair countAllHostsAndCPUSocketsByType(Type type) { + GenericSearchBuilder sb = createSearchBuilder(SumCount.class); + sb.select("sum", Func.SUM, sb.entity().getCpuSockets()); + sb.select("count", Func.COUNT, null); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("type", type); + SumCount result = customSearch(sc, null).get(0); + return new Pair<>((int)result.count, (int)result.sum); } - private SearchCriteria scHostsFromZoneUpRouting(long id) { - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dc", id); - sc.setParameters("status", Status.Up); - sc.setParameters("type", Host.Type.Routing); - return sc; + private List listIdsForRoutingByZoneIdAndResourceState(long zoneId, ResourceState state) { + return listIdsBy(Type.Routing, Status.Up, state, null, zoneId, null, null); + } + + @Override + public List listEnabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Enabled); + } + + @Override + public List listDisabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Disabled); } @Override @@ -603,9 +638,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.append(" "); } - if (logger.isTraceEnabled()) { - logger.trace("Following hosts got reset: " + sb.toString()); - } + logger.trace("Following hosts got reset: {}", sb); } /* @@ -615,8 +648,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao SearchCriteria sc = ClustersOwnedByMSSearch.create(); sc.setParameters("server", managementServerId); - List clusters = customSearch(sc, null); - return clusters; + return customSearch(sc, null); } /* @@ -626,13 +658,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao SearchCriteria sc = ClustersForHostsNotOwnedByAnyMSSearch.create(); sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed); - List clusters = customSearch(sc, null); - return clusters; + return customSearch(sc, null); } /** * This determines if hosts belonging to cluster(@clusterId) are up for grabs - * * This is used for handling following cases: * 1. First host added in cluster * 2. During MS restart all hosts in a cluster are without any MS @@ -642,9 +672,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("cluster", clusterId); List hosts = search(sc, null); - boolean ownCluster = (hosts == null || hosts.size() == 0); - - return ownCluster; + return (hosts == null || hosts.isEmpty()); } @Override @@ -661,14 +689,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao logger.debug("Completed resetting hosts suitable for reconnect"); } - List assignedHosts = new ArrayList(); + List assignedHosts = new ArrayList<>(); if (logger.isDebugEnabled()) { logger.debug("Acquiring hosts for clusters already owned by this management server"); } List clusters = findClustersOwnedByManagementServer(managementServerId); txn.start(); - if (clusters.size() > 0) { + if (!clusters.isEmpty()) { // handle clusters already owned by @managementServerId SearchCriteria sc = UnmanagedDirectConnectSearch.create(); sc.setParameters("lastPinged", lastPingSecondsAfter); @@ -683,13 +711,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.append(host.getId()); sb.append(" "); } - if (logger.isTraceEnabled()) { - logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString()); - } - } - if (logger.isDebugEnabled()) { - logger.debug("Completed acquiring hosts for clusters already owned by this management server"); + logger.trace("Following hosts got acquired for clusters already owned: {}", sb); } + logger.debug("Completed acquiring hosts for clusters already owned by this management server"); if (assignedHosts.size() < limit) { if (logger.isDebugEnabled()) { @@ -701,7 +725,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao if (clusters.size() > limit) { updatedClusters = clusters.subList(0, limit.intValue()); } - if (updatedClusters.size() > 0) { + if (!updatedClusters.isEmpty()) { SearchCriteria sc = UnmanagedDirectConnectSearch.create(); sc.setParameters("lastPinged", lastPingSecondsAfter); sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed); @@ -709,10 +733,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao List unmanagedHosts = lockRows(sc, null, true); // group hosts based on cluster - Map> hostMap = new HashMap>(); + Map> hostMap = new HashMap<>(); for (HostVO host : unmanagedHosts) { if (hostMap.get(host.getClusterId()) == null) { - hostMap.put(host.getClusterId(), new ArrayList()); + hostMap.put(host.getClusterId(), new ArrayList<>()); } hostMap.get(host.getClusterId()).add(host); } @@ -733,13 +757,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao break; } } - if (logger.isTraceEnabled()) { - logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString()); - } - } - if (logger.isDebugEnabled()) { - logger.debug("Completed acquiring hosts for clusters not owned by any management server"); + logger.trace("Following hosts got acquired from newly owned clusters: {}", sb); } + logger.debug("Completed acquiring hosts for clusters not owned by any management server"); } txn.commit(); @@ -794,6 +814,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List listByHostTag(Host.Type type, Long clusterId, Long podId, Long dcId, String hostTag) { + return listHostsWithOrWithoutHostTags(type, clusterId, podId, dcId, hostTag, true); + } + + private List listHostsWithOrWithoutHostTags(Host.Type type, Long clusterId, Long podId, Long dcId, String hostTags, boolean withHostTags) { + if (StringUtils.isEmpty(hostTags)) { + logger.debug("Host tags not specified, to list hosts"); + return new ArrayList<>(); + } + SearchBuilder hostSearch = createSearchBuilder(); HostVO entity = hostSearch.entity(); hostSearch.and("type", entity.getType(), SearchCriteria.Op.EQ); @@ -804,7 +833,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao hostSearch.and("resourceState", entity.getResourceState(), SearchCriteria.Op.EQ); SearchCriteria sc = hostSearch.create(); - sc.setParameters("type", type.toString()); + if (type != null) { + sc.setParameters("type", type.toString()); + } if (podId != null) { sc.setParameters("pod", podId); } @@ -817,27 +848,38 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("status", Status.Up.toString()); sc.setParameters("resourceState", ResourceState.Enabled.toString()); - List tmpHosts = listBy(sc); - List correctHostsByHostTags = new ArrayList(); - List hostIdsByComputeOffTags = findHostByComputeOfferings(hostTag); + List upAndEnabledHosts = listBy(sc); + if (CollectionUtils.isEmpty(upAndEnabledHosts)) { + return new ArrayList<>(); + } - tmpHosts.forEach((host) -> { if(hostIdsByComputeOffTags.contains(host.getId())) correctHostsByHostTags.add(host);}); + List hostIdsByHostTags = findHostIdsByHostTags(hostTags); + if (CollectionUtils.isEmpty(hostIdsByHostTags)) { + return withHostTags ? new ArrayList<>() : upAndEnabledHosts; + } - return correctHostsByHostTags; + if (withHostTags) { + List upAndEnabledHostsWithHostTags = new ArrayList<>(); + upAndEnabledHosts.forEach((host) -> { if (hostIdsByHostTags.contains(host.getId())) upAndEnabledHostsWithHostTags.add(host);}); + return upAndEnabledHostsWithHostTags; + } else { + List upAndEnabledHostsWithoutHostTags = new ArrayList<>(); + upAndEnabledHosts.forEach((host) -> { if (!hostIdsByHostTags.contains(host.getId())) upAndEnabledHostsWithoutHostTags.add(host);}); + return upAndEnabledHostsWithoutHostTags; + } } @Override public List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag) { + if (StringUtils.isNotEmpty(haTag)) { + return listHostsWithOrWithoutHostTags(type, clusterId, podId, dcId, haTag, false); + } + SearchBuilder hostTagSearch = _hostTagsDao.createSearchBuilder(); hostTagSearch.and(); hostTagSearch.op("isTagARule", hostTagSearch.entity().getIsTagARule(), Op.EQ); hostTagSearch.or("tagDoesNotExist", hostTagSearch.entity().getIsTagARule(), Op.NULL); hostTagSearch.cp(); - if (haTag != null && !haTag.isEmpty()) { - hostTagSearch.and().op("tag", hostTagSearch.entity().getTag(), SearchCriteria.Op.NEQ); - hostTagSearch.or("tagNull", hostTagSearch.entity().getTag(), SearchCriteria.Op.NULL); - hostTagSearch.cp(); - } SearchBuilder hostSearch = createSearchBuilder(); @@ -848,18 +890,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao hostSearch.and("status", hostSearch.entity().getStatus(), SearchCriteria.Op.EQ); hostSearch.and("resourceState", hostSearch.entity().getResourceState(), SearchCriteria.Op.EQ); - hostSearch.join("hostTagSearch", hostTagSearch, hostSearch.entity().getId(), hostTagSearch.entity().getHostId(), JoinBuilder.JoinType.LEFTOUTER); - SearchCriteria sc = hostSearch.create(); sc.setJoinParameters("hostTagSearch", "isTagARule", false); - if (haTag != null && !haTag.isEmpty()) { - sc.setJoinParameters("hostTagSearch", "tag", haTag); - } - if (type != null) { sc.setParameters("type", type); } @@ -899,12 +935,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @DB @Override public List findLostHosts(long timeout) { - List result = new ArrayList(); + List result = new ArrayList<>(); String sql = "select h.id from host h left join cluster c on h.cluster_id=c.id where h.mgmt_server_id is not null and h.last_ping < ? and h.status in ('Up', 'Updating', 'Disconnected', 'Connecting') and h.type not in ('ExternalFirewall', 'ExternalLoadBalancer', 'TrafficMonitor', 'SecondaryStorage', 'LocalSecondaryStorage', 'L2Networking') and (h.cluster_id is null or c.managed_state = 'Managed') ;"; try (TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = txn.prepareStatement(sql);) { + PreparedStatement pstmt = txn.prepareStatement(sql)) { pstmt.setLong(1, timeout); - try (ResultSet rs = pstmt.executeQuery();) { + try (ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); //ID column result.add(findById(id)); @@ -937,7 +973,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HashMap> groupDetails = host.getGpuGroupDetails(); if (groupDetails != null) { // Create/Update GPU group entries - _hostGpuGroupsDao.persist(host.getId(), new ArrayList(groupDetails.keySet())); + _hostGpuGroupsDao.persist(host.getId(), new ArrayList<>(groupDetails.keySet())); // Create/Update VGPU types entries _vgpuTypesDao.persist(host.getId(), groupDetails); } @@ -980,7 +1016,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao boolean persisted = super.update(hostId, host); if (!persisted) { - return persisted; + return false; } saveDetails(host); @@ -989,7 +1025,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao txn.commit(); - return persisted; + return true; } @Override @@ -1000,11 +1036,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao + "select h.data_center_id, h.type, count(*) as count from host as h INNER JOIN mshost as m ON h.mgmt_server_id=m.msid " + "where h.status='Up' and h.type='Routing' and m.last_update > ? " + "group by h.data_center_id, h.type) as t " + "ORDER by t.data_center_id, t.type"; - ArrayList l = new ArrayList(); + ArrayList l = new ArrayList<>(); TransactionLegacy txn = TransactionLegacy.currentTxn(); - ; - PreparedStatement pstmt = null; + PreparedStatement pstmt; try { pstmt = txn.prepareAutoCloseStatement(sql); String gmtCutTime = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); @@ -1028,9 +1063,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public long getNextSequence(long hostId) { - if (logger.isTraceEnabled()) { - logger.trace("getNextSequence(), hostId: " + hostId); - } + logger.trace("getNextSequence(), hostId: {}", hostId); TableGenerator tg = _tgs.get("host_req_sq"); assert tg != null : "how can this be wrong!"; @@ -1099,31 +1132,30 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); + // TODO handle this if(debug){}else{log.debug} it makes no sense if (logger.isDebugEnabled()) { - - StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); - str.append(". Name=").append(host.getName()); - str.append("; New=[status=").append(newStatus.toString()).append(":msid=").append(newStatus.lostConnection() ? "null" : host.getManagementServerId()) - .append(":lastpinged=").append(host.getLastPinged()).append("]"); - str.append("; Old=[status=").append(oldStatus.toString()).append(":msid=").append(host.getManagementServerId()).append(":lastpinged=").append(oldPingTime) - .append("]"); - str.append("; DB=[status=").append(vo.getStatus().toString()).append(":msid=").append(vo.getManagementServerId()).append(":lastpinged=").append(vo.getLastPinged()) - .append(":old update count=").append(oldUpdateCount).append("]"); - logger.debug(str.toString()); + String str = "Unable to update host for event:" + event + + ". Name=" + host.getName() + + "; New=[status=" + newStatus + ":msid=" + (newStatus.lostConnection() ? "null" : host.getManagementServerId()) + + ":lastpinged=" + host.getLastPinged() + "]" + + "; Old=[status=" + oldStatus.toString() + ":msid=" + host.getManagementServerId() + ":lastpinged=" + oldPingTime + + "]" + + "; DB=[status=" + vo.getStatus().toString() + ":msid=" + vo.getManagementServerId() + ":lastpinged=" + vo.getLastPinged() + + ":old update count=" + oldUpdateCount + "]"; + logger.debug(str); } else { - StringBuilder msg = new StringBuilder("Agent status update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old status = " + oldStatus); - msg.append("; event = " + event); - msg.append("; new status = " + newStatus); - msg.append("; old update count = " + oldUpdateCount); - msg.append("; new update count = " + newUpdateCount + "]"); - logger.debug(msg.toString()); + String msg = "Agent status update: [" + "id = " + host.getId() + + "; name = " + host.getName() + + "; old status = " + oldStatus + + "; event = " + event + + "; new status = " + newStatus + + "; old update count = " + oldUpdateCount + + "; new update count = " + newUpdateCount + "]"; + logger.debug(msg); } if (ho.getState() == newStatus) { - logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus); + logger.debug("Host {} state has already been updated to {}", ho.getName(), newStatus); return true; } } @@ -1149,25 +1181,24 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao int result = update(ub, sc, null); assert result <= 1 : "How can this update " + result + " rows? "; + // TODO handle this if(debug){}else{log.debug} it makes no sense if (logger.isDebugEnabled() && result == 0) { HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); - StringBuilder str = new StringBuilder("Unable to update resource state: ["); - str.append("m = " + host.getId()); - str.append("; name = " + host.getName()); - str.append("; old state = " + oldState); - str.append("; event = " + event); - str.append("; new state = " + newState + "]"); - logger.debug(str.toString()); + String str = "Unable to update resource state: [" + "m = " + host.getId() + + "; name = " + host.getName() + + "; old state = " + oldState + + "; event = " + event + + "; new state = " + newState + "]"; + logger.debug(str); } else { - StringBuilder msg = new StringBuilder("Resource state update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old state = " + oldState); - msg.append("; event = " + event); - msg.append("; new state = " + newState + "]"); - logger.debug(msg.toString()); + String msg = "Resource state update: [" + "id = " + host.getId() + + "; name = " + host.getName() + + "; old state = " + oldState + + "; event = " + event + + "; new state = " + newState + "]"; + logger.debug(msg); } return result > 0; @@ -1190,6 +1221,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listIdsByDataCenterId(Long zoneId) { + return listIdsBy(Type.Routing, null, null, null, zoneId, null, null); + } + @Override public List findByPodId(Long podId) { SearchCriteria sc = PodSearch.create(); @@ -1197,6 +1233,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listIdsByPodId(Long podId) { + return listIdsBy(null, null, null, null, null, podId, null); + } + @Override public List findByClusterId(Long clusterId) { SearchCriteria sc = ClusterSearch.create(); @@ -1204,6 +1245,63 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + protected List listIdsBy(Host.Type type, Status status, ResourceState resourceState, + HypervisorType hypervisorType, Long zoneId, Long podId, Long clusterId) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status); + } + if (resourceState != null) { + sc.setParameters("resourceState", resourceState); + } + if (hypervisorType != null) { + sc.setParameters("hypervisorType", hypervisorType); + } + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (podId != null) { + sc.setParameters("podId", podId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + return customSearch(sc, null); + } + + @Override + public List listIdsByClusterId(Long clusterId) { + return listIdsBy(null, null, null, null, null, null, clusterId); + } + + @Override + public List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId) { + return listIdsBy(Type.Routing, Status.Up, null, null, zoneId, podId, clusterId); + } + + @Override + public List listIdsByType(Type type) { + return listIdsBy(type, null, null, null, null, null, null); + } + + @Override + public List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType) { + return listIdsBy(null, Status.Up, ResourceState.Enabled, hypervisorType, zoneId, null, null); + } + @Override public List findByClusterIdAndEncryptionSupport(Long clusterId) { SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder(); @@ -1256,6 +1354,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { + SearchCriteria sc = TypeClusterStatusSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("cluster", clusterId); + List list = listBy(sc, new Filter(1)); + return list.isEmpty() ? null : list.get(0); + } + @Override public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { SearchCriteria sc = TypeClusterStatusSearch.create(); @@ -1266,7 +1373,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao Filter orderByFilter = new Filter(HostVO.class, "created", true, null, null); List hosts = search(sc, orderByFilter, null, false); - if (hosts != null && hosts.size() > 0) { + if (hosts != null && !hosts.isEmpty()) { return hosts.get(0); } @@ -1275,9 +1382,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List listAllHosts(long zoneId) { - SearchCriteria sc = HostIdSearch.create(); - sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); - return customSearch(sc, null); + return listIdsBy(null, null, null, null, zoneId, null, null); } @Override @@ -1311,19 +1416,19 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } @Override - public List listClustersByHostTag(String computeOfferingTags) { + public List listClustersByHostTag(String hostTags) { TransactionLegacy txn = TransactionLegacy.currentTxn(); - String sql = this.LIST_CLUSTERID_FOR_HOST_TAG; - PreparedStatement pstmt = null; - List result = new ArrayList(); - List tags = Arrays.asList(computeOfferingTags.split(this.SEPARATOR)); - String subselect = getHostIdsByComputeTags(tags); - sql = String.format(sql, subselect); + String selectStmtToListClusterIdsByHostTags = LIST_CLUSTER_IDS_FOR_HOST_TAGS; + PreparedStatement pstmt; + List result = new ArrayList<>(); + List tags = Arrays.asList(hostTags.split(SEPARATOR)); + String selectStmtToListHostIdsByHostTags = getSelectStmtToListHostIdsByHostTags(tags); + selectStmtToListClusterIdsByHostTags = String.format(selectStmtToListClusterIdsByHostTags, selectStmtToListHostIdsByHostTags); try { - pstmt = txn.prepareStatement(sql); + pstmt = txn.prepareStatement(selectStmtToListClusterIdsByHostTags); - for(int i = 0; i < tags.size(); i++){ + for (int i = 0; i < tags.size(); i++){ pstmt.setString(i+1, tags.get(i)); } @@ -1334,20 +1439,20 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao pstmt.close(); return result; } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + sql, e); + throw new CloudRuntimeException("DB Exception on: " + selectStmtToListClusterIdsByHostTags, e); } } - private List findHostByComputeOfferings(String computeOfferingTags){ + private List findHostIdsByHostTags(String hostTags){ TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; - List result = new ArrayList(); - List tags = Arrays.asList(computeOfferingTags.split(this.SEPARATOR)); - String select = getHostIdsByComputeTags(tags); + PreparedStatement pstmt; + List result = new ArrayList<>(); + List tags = Arrays.asList(hostTags.split(SEPARATOR)); + String selectStmtToListHostIdsByHostTags = getSelectStmtToListHostIdsByHostTags(tags); try { - pstmt = txn.prepareStatement(select); + pstmt = txn.prepareStatement(selectStmtToListHostIdsByHostTags); - for(int i = 0; i < tags.size(); i++){ + for (int i = 0; i < tags.size(); i++){ pstmt.setString(i+1, tags.get(i)); } @@ -1358,7 +1463,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao pstmt.close(); return result; } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + select, e); + throw new CloudRuntimeException("DB Exception on: " + selectStmtToListHostIdsByHostTags, e); } } @@ -1408,16 +1513,16 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return result; } - private String getHostIdsByComputeTags(List offeringTags){ - List questionMarks = new ArrayList(); - offeringTags.forEach((tag) -> { questionMarks.add("?"); }); - return String.format(this.LIST_HOST_IDS_BY_COMPUTETAGS, String.join(",", questionMarks),questionMarks.size()); + private String getSelectStmtToListHostIdsByHostTags(List hostTags){ + List questionMarks = new ArrayList<>(); + hostTags.forEach((tag) -> questionMarks.add("?")); + return String.format(LIST_HOST_IDS_BY_HOST_TAGS, String.join(SEPARATOR, questionMarks), questionMarks.size()); } @Override public List listHostsWithActiveVMs(long offeringId) { TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; + PreparedStatement pstmt; List result = new ArrayList<>(); StringBuilder sql = new StringBuilder(GET_HOSTS_OF_ACTIVE_VMS); try { @@ -1466,7 +1571,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List listOrderedHostsHypervisorVersionsInDatacenter(long datacenterId, HypervisorType hypervisorType) { - PreparedStatement pstmt = null; + PreparedStatement pstmt; List result = new ArrayList<>(); try { TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -1483,15 +1588,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return result; } - @Override - public List listAllHostsByType(Host.Type type) { - SearchCriteria sc = TypeSearch.create(); - sc.setParameters("type", type); - sc.setParameters("resourceState", ResourceState.Enabled); - - return listBy(sc); - } - @Override public List listByType(Host.Type type) { SearchCriteria sc = TypeSearch.create(); @@ -1636,4 +1732,71 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } return String.format(sqlFindHostInZoneToExecuteCommand, hostResourceStatus); } + + @Override + public boolean isHostUp(long hostId) { + GenericSearchBuilder sb = createSearchBuilder(Status.class); + sb.and("id", sb.entity().getId(), Op.EQ); + sb.selectFields(sb.entity().getStatus()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", hostId); + List statuses = customSearch(sc, null); + return CollectionUtils.isNotEmpty(statuses) && Status.Up.equals(statuses.get(0)); + } + + @Override + public List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.IN); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.IN); + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sb.and().op(sb.entity().getHypervisorType(), SearchCriteria.Op.NULL); + sb.or("hypervisorTypes", sb.entity().getHypervisorType(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sc.setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + sc.setParameters("resourceState", resourceStates.toArray()); + sc.setParameters("type", types.toArray()); + return customSearch(sc, null); + } + + @Override + public List listDistinctHypervisorTypes(final Long zoneId) { + GenericSearchBuilder sb = createSearchBuilder(HypervisorType.class); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.select(null, Func.DISTINCT, sb.entity().getHypervisorType()); + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + sc.setParameters("type", Type.Routing); + return customSearch(sc, null); + } + + @Override + public List listByIds(List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsSearch.create(); + sc.setParameters("id", ids.toArray()); + return search(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java index b80ccd9cd1b..056445225d0 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java @@ -43,7 +43,9 @@ public interface FirewallRulesDao extends GenericDao { List listStaticNatByVmId(long vmId); - List listByIpPurposeAndProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol, FirewallRule.Purpose purpose); + List listByIpPurposePortsProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol, FirewallRule.Purpose purpose); + + List listByIpPurposeProtocolAndNotRevoked(long ipAddressId, FirewallRule.Purpose purpose, String protocol); FirewallRuleVO findByRelatedId(long ruleId); diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java index c8bd7e2147e..a793a9172d4 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java @@ -263,8 +263,25 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i } @Override - public List listByIpPurposeAndProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol, - FirewallRule.Purpose purpose) { + public List listByIpPurposeProtocolAndNotRevoked(long ipAddressId, Purpose purpose, String protocol) { + SearchCriteria sc = NotRevokedSearch.create(); + sc.setParameters("ipId", ipAddressId); + sc.setParameters("state", State.Revoke); + + if (purpose != null) { + sc.setParameters("purpose", purpose); + } + + if (protocol != null) { + sc.setParameters("protocol", protocol); + } + + return listBy(sc); + } + + @Override + public List listByIpPurposePortsProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol, + FirewallRule.Purpose purpose) { SearchCriteria sc = NotRevokedSearch.create(); sc.setParameters("ipId", ipAddressId); sc.setParameters("state", State.Revoke); diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java index aa143838c34..5499d04e3a1 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java @@ -421,7 +421,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen public long countFreeIpsInVlan(long vlanDbId) { SearchCriteria sc = VlanDbIdSearchUnallocated.create(); sc.setParameters("vlanDbId", vlanDbId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java index fa448b026e4..0aae532eac5 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java @@ -415,8 +415,7 @@ public class NetworkDaoImpl extends GenericDaoBaseimplements Ne sc.setParameters("broadcastUri", broadcastURI); sc.setParameters("guestType", guestTypes); sc.setJoinParameters("persistent", "persistent", isPersistent); - List persistentNetworks = search(sc, null); - return persistentNetworks.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index a37acdf6029..8229c3a62fc 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -55,8 +55,7 @@ public class CommandExecLogDaoImpl extends GenericDaoBase sc = CommandSearch.create(); sc.setParameters("host_id", id); sc.setParameters("command_name", "CopyCommand"); - List copyCmds = customSearch(sc, null); - return copyCmds.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index 48e63d8e2b5..ceb5b0a4fc1 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,7 +54,7 @@ public interface ServiceOfferingDao extends GenericDao List listPublicByCpuAndMemory(Integer cpus, Integer memory); - List listByHostTag(String tag); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); + + List listIdsByHostTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 706dcdc1b7b..803522fa6aa 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -34,6 +34,7 @@ import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; @@ -293,8 +294,9 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase listByHostTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByHostTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tagNotNull", sb.entity().getHostTag(), SearchCriteria.Op.NNULL); sb.and().op("tagEq", sb.entity().getHostTag(), SearchCriteria.Op.EQ); sb.or("tagStartLike", sb.entity().getHostTag(), SearchCriteria.Op.LIKE); @@ -302,11 +304,12 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase sc = sb.create(); + SearchCriteria sc = sb.create(); + sc.setParameters("tagEq", tag); sc.setParameters("tagStartLike", tag + ",%"); sc.setParameters("tagMidLike", "%," + tag + ",%"); sc.setParameters("tagEndLike", "%," + tag); - return listBy(sc); + return customSearch(sc, null); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/BucketDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/BucketDao.java index f45f28b5c2c..2511df49807 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/BucketDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/BucketDao.java @@ -27,4 +27,8 @@ public interface BucketDao extends GenericDao { List listByObjectStoreIdAndAccountId(long objectStoreId, long accountId); List searchByIds(Long[] ids); + + Long countBucketsForAccount(long accountId); + + Long calculateObjectStorageAllocationForAccount(long accountId); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java index 98bef6201a1..473879d933d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java @@ -16,8 +16,10 @@ // under the License. package com.cloud.storage.dao; +import com.cloud.configuration.Resource; import com.cloud.storage.BucketVO; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import org.springframework.stereotype.Component; @@ -31,6 +33,8 @@ public class BucketDaoImpl extends GenericDaoBase implements Buc private SearchBuilder searchFilteringStoreId; private SearchBuilder bucketSearch; + private GenericSearchBuilder CountBucketsByAccount; + private GenericSearchBuilder CalculateBucketsQuotaByAccount; private static final String STORE_ID = "store_id"; private static final String STATE = "state"; @@ -54,6 +58,20 @@ public class BucketDaoImpl extends GenericDaoBase implements Buc bucketSearch.and("idIN", bucketSearch.entity().getId(), SearchCriteria.Op.IN); bucketSearch.done(); + CountBucketsByAccount = createSearchBuilder(Long.class); + CountBucketsByAccount.select(null, SearchCriteria.Func.COUNT, null); + CountBucketsByAccount.and(ACCOUNT_ID, CountBucketsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); + CountBucketsByAccount.and(STATE, CountBucketsByAccount.entity().getState(), SearchCriteria.Op.NIN); + CountBucketsByAccount.and("removed", CountBucketsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); + CountBucketsByAccount.done(); + + CalculateBucketsQuotaByAccount = createSearchBuilder(SumCount.class); + CalculateBucketsQuotaByAccount.select("sum", SearchCriteria.Func.SUM, CalculateBucketsQuotaByAccount.entity().getQuota()); + CalculateBucketsQuotaByAccount.and(ACCOUNT_ID, CalculateBucketsQuotaByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); + CalculateBucketsQuotaByAccount.and(STATE, CalculateBucketsQuotaByAccount.entity().getState(), SearchCriteria.Op.NIN); + CalculateBucketsQuotaByAccount.and("removed", CalculateBucketsQuotaByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); + CalculateBucketsQuotaByAccount.done(); + return true; } @Override @@ -79,4 +97,21 @@ public class BucketDaoImpl extends GenericDaoBase implements Buc sc.setParameters("idIN", ids); return search(sc, null, null, false); } + + @Override + public Long countBucketsForAccount(long accountId) { + SearchCriteria sc = CountBucketsByAccount.create(); + sc.setParameters(ACCOUNT_ID, accountId); + sc.setParameters(STATE, BucketVO.State.Destroyed); + return customSearch(sc, null).get(0); + } + + @Override + public Long calculateObjectStorageAllocationForAccount(long accountId) { + SearchCriteria sc = CalculateBucketsQuotaByAccount.create(); + sc.setParameters(ACCOUNT_ID, accountId); + sc.setParameters(STATE, BucketVO.State.Destroyed); + Long totalQuota = customSearch(sc, null).get(0).sum; + return (totalQuota * Resource.ResourceType.bytesToGiB); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java index 376933f92e7..a3baa3b4cb0 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java @@ -30,6 +30,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import com.cloud.utils.Pair; + public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase implements StoragePoolDetailsDao, ScopedConfigStorage { @Inject @@ -46,7 +48,7 @@ public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase getParentScope(long id) { + StoragePoolVO pool = _storagePoolDao.findById(id); + if (pool != null) { + if (pool.getClusterId() != null) { + return new Pair<>(getScope().getParent(), pool.getClusterId()); + } else { + return new Pair<>(ConfigKey.Scope.Zone, pool.getDataCenterId()); + } + } + return null; + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java index 62ef5b7570d..639c2571541 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java @@ -34,7 +34,7 @@ public interface StoragePoolHostDao extends GenericDao List findHostsConnectedToPools(List poolIds); - List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); + boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public void deletePrimaryRecordsForHost(long hostId); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index 987a42f410e..5a466af348c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -55,11 +55,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { - ArrayList> l = new ArrayList>(); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("SQLException: ", e); } - return l; + return false; } /** diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 1c5a2cb4256..3ac514530ce 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -67,6 +67,8 @@ public interface VMTemplateDao extends GenericDao, StateDao< public List userIsoSearch(boolean listRemoved); + List listAllReadySystemVMTemplates(Long zoneId); + VMTemplateVO findSystemVMTemplate(long zoneId); VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType); @@ -91,6 +93,5 @@ public interface VMTemplateDao extends GenericDao, StateDao< List listByIds(List ids); - List listByTemplateTag(String tag); - + List listIdsByTemplateTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 4665f660251..7513848536b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -344,19 +344,12 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem readySystemTemplateSearch = createSearchBuilder(); readySystemTemplateSearch.and("state", readySystemTemplateSearch.entity().getState(), SearchCriteria.Op.EQ); readySystemTemplateSearch.and("templateType", readySystemTemplateSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); + readySystemTemplateSearch.and("hypervisorType", readySystemTemplateSearch.entity().getHypervisorType(), SearchCriteria.Op.IN); SearchBuilder templateDownloadSearch = _templateDataStoreDao.createSearchBuilder(); templateDownloadSearch.and("downloadState", templateDownloadSearch.entity().getDownloadState(), SearchCriteria.Op.IN); readySystemTemplateSearch.join("vmTemplateJoinTemplateStoreRef", templateDownloadSearch, templateDownloadSearch.entity().getTemplateId(), readySystemTemplateSearch.entity().getId(), JoinBuilder.JoinType.INNER); - SearchBuilder hostHyperSearch2 = _hostDao.createSearchBuilder(); - hostHyperSearch2.and("type", hostHyperSearch2.entity().getType(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("zoneId", hostHyperSearch2.entity().getDataCenterId(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("removed", hostHyperSearch2.entity().getRemoved(), SearchCriteria.Op.NULL); - hostHyperSearch2.groupBy(hostHyperSearch2.entity().getHypervisorType()); - - readySystemTemplateSearch.join("tmplHyper", hostHyperSearch2, hostHyperSearch2.entity().getHypervisorType(), readySystemTemplateSearch.entity() - .getHypervisorType(), JoinBuilder.JoinType.INNER); - hostHyperSearch2.done(); + readySystemTemplateSearch.groupBy(readySystemTemplateSearch.entity().getId()); readySystemTemplateSearch.done(); tmpltTypeHyperSearch2 = createSearchBuilder(); @@ -556,29 +549,35 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + public List listAllReadySystemVMTemplates(Long zoneId) { + List availableHypervisors = _hostDao.listDistinctHypervisorTypes(zoneId); + if (CollectionUtils.isEmpty(availableHypervisors)) { + return Collections.emptyList(); + } SearchCriteria sc = readySystemTemplateSearch.create(); sc.setParameters("templateType", Storage.TemplateType.SYSTEM); sc.setParameters("state", VirtualMachineTemplate.State.Active); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", new VMTemplateStorageResourceAssoc.Status[] {VMTemplateStorageResourceAssoc.Status.DOWNLOADED, VMTemplateStorageResourceAssoc.Status.BYPASSED}); - + sc.setParameters("hypervisorType", availableHypervisors.toArray()); + sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", + List.of(VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + VMTemplateStorageResourceAssoc.Status.BYPASSED).toArray()); // order by descending order of id - List tmplts = listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); - - if (tmplts.size() > 0) { - if (hypervisorType == HypervisorType.Any) { - return tmplts.get(0); - } - for (VMTemplateVO tmplt : tmplts) { - if (tmplt.getHypervisorType() == hypervisorType) { - return tmplt; - } - } + return listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); + } + @Override + public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + List templates = listAllReadySystemVMTemplates(zoneId); + if (CollectionUtils.isEmpty(templates)) { + return null; } - return null; + if (hypervisorType == HypervisorType.Any) { + return templates.get(0); + } + return templates.stream() + .filter(t -> t.getHypervisorType() == hypervisorType) + .findFirst() + .orElse(null); } @Override @@ -687,13 +686,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public List listByTemplateTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByTemplateTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tag", sb.entity().getTemplateTag(), SearchCriteria.Op.EQ); sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); sc.setParameters("tag", tag); - return listIncludingRemovedBy(sc); + return customSearchIncludingRemoved(sc, null); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 0c4d707635a..750dbf2bee0 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -571,14 +571,6 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } } - public static class SumCount { - public long sum; - public long count; - - public SumCount() { - } - } - @Override public List listVolumesToBeDestroyed() { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java b/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java index 03857137ded..5c1a7504692 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java @@ -54,7 +54,7 @@ public class ConfigurationGroupsAggregator { public void updateConfigurationGroups() { LOG.debug("Updating configuration groups"); - List configs = configDao.listAllIncludingRemoved(); + List configs = configDao.searchPartialConfigurations(); if (CollectionUtils.isEmpty(configs)) { return; } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index b197fb7c030..9b0e10cbbfc 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -334,7 +334,7 @@ public class SystemVmTemplateRegistration { } }; - public static boolean validateIfSeeded(String url, String path, String nfsVersion) { + public boolean validateIfSeeded(TemplateDataStoreVO templDataStoreVO, String url, String path, String nfsVersion) { String filePath = null; try { filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); @@ -347,6 +347,9 @@ public class SystemVmTemplateRegistration { String templatePath = filePath + File.separator + partialDirPath; File templateProps = new File(templatePath + "/template.properties"); if (templateProps.exists()) { + Pair templateSizes = readTemplatePropertiesSizes(templatePath + "/template.properties"); + updateSeededTemplateDetails(templDataStoreVO.getTemplateId(), templDataStoreVO.getDataStoreId(), + templateSizes.first(), templateSizes.second()); LOGGER.info("SystemVM template already seeded, skipping registration"); return true; } @@ -542,6 +545,21 @@ public class SystemVmTemplateRegistration { } } + public void updateSeededTemplateDetails(long templateId, long storeId, long size, long physicalSize) { + VMTemplateVO template = vmTemplateDao.findById(templateId); + template.setSize(size); + vmTemplateDao.update(template.getId(), template); + + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(storeId, template.getId()); + templateDataStoreVO.setSize(size); + templateDataStoreVO.setPhysicalSize(physicalSize); + templateDataStoreVO.setLastUpdated(new Date(DateUtil.currentGMTTime().getTime())); + boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO); + if (!updated) { + throw new CloudRuntimeException("Failed to update template_store_ref entry for seeded systemVM template"); + } + } + public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) { vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType); } @@ -555,7 +573,7 @@ public class SystemVmTemplateRegistration { } } - private static void readTemplateProperties(String path, SystemVMTemplateDetails details) { + private static Pair readTemplatePropertiesSizes(String path) { File tmpFile = new File(path); Long size = null; Long physicalSize = 0L; @@ -574,8 +592,13 @@ public class SystemVmTemplateRegistration { } catch (IOException ex) { LOGGER.warn("Failed to read from template.properties", ex); } - details.setSize(size); - details.setPhysicalSize(physicalSize); + return new Pair<>(size, physicalSize); + } + + public static void readTemplateProperties(String path, SystemVMTemplateDetails details) { + Pair templateSizes = readTemplatePropertiesSizes(path); + details.setSize(templateSizes.first()); + details.setPhysicalSize(templateSizes.second()); } private void updateTemplateTablesOnFailure(long templateId) { @@ -799,7 +822,7 @@ public class SystemVmTemplateRegistration { TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(storeUrlAndId.second(), templateId); if (templateDataStoreVO != null) { String installPath = templateDataStoreVO.getInstallPath(); - if (validateIfSeeded(storeUrlAndId.first(), installPath, nfsVersion)) { + if (validateIfSeeded(templateDataStoreVO, storeUrlAndId.first(), installPath, nfsVersion)) { continue; } } @@ -870,7 +893,7 @@ public class SystemVmTemplateRegistration { public void doInTransactionWithoutResult(final TransactionStatus status) { Set hypervisorsListInUse = new HashSet(); try { - hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters(); + hypervisorsListInUse = clusterDao.getDistinctAvailableHypervisorsAcrossClusters(); } catch (final Exception e) { LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java index 1c2c4b3c7ce..223d7a46637 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java @@ -87,6 +87,36 @@ public class DatabaseAccessObject { return columnExists; } + public String getColumnType(Connection conn, String tableName, String columnName) { + try (PreparedStatement pstmt = conn.prepareStatement(String.format("DESCRIBE %s %s", tableName, columnName));){ + ResultSet rs = pstmt.executeQuery(); + if (rs.next()) { + return rs.getString("Type"); + } + } catch (SQLException e) { + logger.warn("Type for column {} can not be retrieved in {} ignoring exception: {}", columnName, tableName, e.getMessage()); + } + return null; + } + + public void addColumn(Connection conn, String tableName, String columnName, String columnDefinition) { + try (PreparedStatement pstmt = conn.prepareStatement(String.format("ALTER TABLE %s ADD COLUMN %s %s", tableName, columnName, columnDefinition));){ + pstmt.executeUpdate(); + logger.debug("Column {} is added successfully from the table {}", columnName, tableName); + } catch (SQLException e) { + logger.warn("Unable to add column {} to table {} due to exception", columnName, tableName, e); + } + } + + public void changeColumn(Connection conn, String tableName, String oldColumnName, String newColumnName, String columnDefinition) { + try (PreparedStatement pstmt = conn.prepareStatement(String.format("ALTER TABLE %s CHANGE COLUMN %s %s %s", tableName, oldColumnName, newColumnName, columnDefinition));){ + pstmt.executeUpdate(); + logger.debug("Column {} is changed successfully to {} from the table {}", oldColumnName, newColumnName, tableName); + } catch (SQLException e) { + logger.warn("Unable to add column {} to {} from the table {} due to exception", oldColumnName, newColumnName, tableName, e); + } + } + public String generateIndexName(String tableName, String... columnName) { return String.format("i_%s__%s", tableName, StringUtils.join(columnName, "__")); } @@ -114,6 +144,17 @@ public class DatabaseAccessObject { } } + public void renameIndex(Connection conn, String tableName, String oldName, String newName) { + String stmt = String.format("ALTER TABLE %s RENAME INDEX %s TO %s", tableName, oldName, newName); + logger.debug("Statement: {}", stmt); + try (PreparedStatement pstmt = conn.prepareStatement(stmt)) { + pstmt.execute(); + logger.debug("Renamed index {} to {}", oldName, newName); + } catch (SQLException e) { + logger.warn("Unable to rename index {} to {}", oldName, newName, e); + } + } + protected void closePreparedStatement(PreparedStatement pstmt, String errorMessage) { try { if (pstmt != null) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java index 51e6ac7b9a1..be073fcce77 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java @@ -31,6 +31,12 @@ public class DbUpgradeUtils { } } + public static void renameIndexIfNeeded(Connection conn, String tableName, String oldName, String newName) { + if (!dao.indexExists(conn, tableName, oldName)) { + dao.renameIndex(conn, tableName, oldName, newName); + } + } + public static void addForeignKey(Connection conn, String tableName, String tableColumn, String foreignTableName, String foreignColumnName) { dao.addForeignKey(conn, tableName, tableColumn, foreignTableName, foreignColumnName); } @@ -52,4 +58,20 @@ public class DbUpgradeUtils { } } + public static String getTableColumnType(Connection conn, String tableName, String columnName) { + return dao.getColumnType(conn, tableName, columnName); + } + + public static void addTableColumnIfNotExist(Connection conn, String tableName, String columnName, String columnDefinition) { + if (!dao.columnExists(conn, tableName, columnName)) { + dao.addColumn(conn, tableName, columnName, columnDefinition); + } + } + + public static void changeTableColumnIfNotExist(Connection conn, String tableName, String oldColumnName, String newColumnName, String columnDefinition) { + if (dao.columnExists(conn, tableName, oldColumnName)) { + dao.changeColumn(conn, tableName, oldColumnName, newColumnName, columnDefinition); + } + } + } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java index 197ca1cb34c..6298e0e729a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java @@ -53,6 +53,7 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr @Override public void performDataMigration(Connection conn) { + addIndexes(conn); } @Override @@ -80,4 +81,42 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } } + + private void addIndexes(Connection conn) { + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "mgmt_server_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource_state"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "type"); + + DbUpgradeUtils.renameIndexIfNeeded(conn, "user_ip_address", "public_ip_address", "uk_public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "vlan_db_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "vlan_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "resource_id", "resource_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "op_host_planner_reservation", "resource_usage"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "pool_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "data_center_id", "status", "scope", "hypervisor"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "router_network_ref", "guest_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "domain_router", "role"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "async_job", "instance_type", "job_status"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "cluster", "managed_state"); + } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java index 06a68ec3d8b..d6dc85dbb9a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java @@ -17,10 +17,16 @@ package com.cloud.upgrade.dao; import com.cloud.upgrade.SystemVmTemplateRegistration; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import java.io.InputStream; import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.List; + +import org.apache.cloudstack.framework.config.ConfigKey; public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { private SystemVmTemplateRegistration systemVmTemplateRegistration; @@ -53,6 +59,7 @@ public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgr @Override public void performDataMigration(Connection conn) { + migrateConfigurationScopeToBitmask(conn); } @Override @@ -80,4 +87,35 @@ public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgr throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } } + + protected void migrateConfigurationScopeToBitmask(Connection conn) { + String scopeDataType = DbUpgradeUtils.getTableColumnType(conn, "configuration", "scope"); + logger.info("Data type of the column scope of table configuration is {}", scopeDataType); + if (!"varchar(255)".equals(scopeDataType)) { + return; + } + DbUpgradeUtils.addTableColumnIfNotExist(conn, "configuration", "new_scope", "BIGINT DEFAULT 0"); + migrateExistingConfigurationScopeValues(conn); + DbUpgradeUtils.dropTableColumnsIfExist(conn, "configuration", List.of("scope")); + DbUpgradeUtils.changeTableColumnIfNotExist(conn, "configuration", "new_scope", "scope", "BIGINT NOT NULL DEFAULT 0 COMMENT 'Bitmask for scope(s) of this parameter'"); + } + + protected void migrateExistingConfigurationScopeValues(Connection conn) { + StringBuilder sql = new StringBuilder("UPDATE configuration\n" + + "SET new_scope = " + + " CASE "); + for (ConfigKey.Scope scope : ConfigKey.Scope.values()) { + sql.append(" WHEN scope = '").append(scope.name()).append("' THEN ").append(scope.getBitValue()).append(" "); + } + sql.append(" ELSE 0 " + + " END " + + "WHERE scope IS NOT NULL;"); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql.toString())) { + pstmt.executeUpdate(); + } catch (SQLException e) { + logger.error("Failed to migrate existing configuration scope values to bitmask", e); + throw new CloudRuntimeException(String.format("Failed to migrate existing configuration scope values to bitmask due to: %s", e.getMessage())); + } + } } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworksDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworksDaoImpl.java index f8ffbf74f85..99ba3587688 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworksDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworksDaoImpl.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.usage.dao; -import com.cloud.network.Network; import com.cloud.usage.UsageNetworksVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; @@ -70,11 +69,10 @@ public class UsageNetworksDaoImpl extends GenericDaoBase SearchCriteria sc = this.createSearchCriteria(); sc.addAnd("networkId", SearchCriteria.Op.EQ, networkId); sc.addAnd("removed", SearchCriteria.Op.NULL); - UsageNetworksVO vo = findOneBy(sc); - if (vo != null) { - vo.setRemoved(removed); - vo.setState(Network.State.Destroy.name()); - update(vo.getId(), vo); + List usageNetworksVOs = listBy(sc); + for (UsageNetworksVO entry : usageNetworksVOs) { + entry.setRemoved(removed); + update(entry.getId(), entry); } } catch (final Exception e) { txn.rollback(); diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVpcDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVpcDaoImpl.java index 45ae845e58f..70cdadd1629 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVpcDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVpcDaoImpl.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.usage.dao; -import com.cloud.network.vpc.Vpc; import com.cloud.usage.UsageVpcVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; @@ -64,11 +63,10 @@ public class UsageVpcDaoImpl extends GenericDaoBase implements SearchCriteria sc = this.createSearchCriteria(); sc.addAnd("vpcId", SearchCriteria.Op.EQ, vpcId); sc.addAnd("removed", SearchCriteria.Op.NULL); - UsageVpcVO vo = findOneBy(sc); - if (vo != null) { - vo.setRemoved(removed); - vo.setState(Vpc.State.Inactive.name()); - update(vo.getId(), vo); + List usageVpcVOs = listBy(sc); + for (UsageVpcVO entry : usageVpcVOs) { + entry.setRemoved(removed); + update(entry.getId(), entry); } } catch (final Exception e) { txn.rollback(); diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java index 863f6c96008..aa6e49666dd 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java @@ -23,18 +23,18 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; -import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.api.ResourceDetail; @Entity @Table(name = "account_details") -public class AccountDetailVO implements InternalIdentity { +public class AccountDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Column(name = "id") private long id; @Column(name = "account_id") - private long accountId; + private long resourceId; @Column(name = "name") private String name; @@ -46,13 +46,14 @@ public class AccountDetailVO implements InternalIdentity { } public AccountDetailVO(long accountId, String name, String value) { - this.accountId = accountId; + this.resourceId = accountId; this.name = name; this.value = value; } - public long getAccountId() { - return accountId; + @Override + public long getResourceId() { + return resourceId; } public String getName() { @@ -63,6 +64,11 @@ public class AccountDetailVO implements InternalIdentity { return value; } + @Override + public boolean isDisplay() { + return true; + } + public void setValue(String value) { this.value = value; } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java index 514433e8068..65bbe1670a8 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java @@ -19,8 +19,9 @@ package com.cloud.user; import java.util.Map; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; -public interface AccountDetailsDao extends GenericDao { +public interface AccountDetailsDao extends GenericDao, ResourceDetailsDao { Map findDetails(long accountId); void persist(long accountId, Map details); @@ -34,6 +35,4 @@ public interface AccountDetailsDao extends GenericDao { * they will get created */ void update(long accountId, Map details); - - String getActualValue(AccountDetailVO accountDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java index 510270ad7bf..cbacf9af572 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java @@ -27,22 +27,21 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import com.cloud.domain.DomainDetailVO; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; import com.cloud.domain.dao.DomainDetailsDao; import com.cloud.user.dao.AccountDao; -import com.cloud.utils.crypt.DBEncryptionUtil; -import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.Pair; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; -public class AccountDetailsDaoImpl extends GenericDaoBase implements AccountDetailsDao, ScopedConfigStorage { +public class AccountDetailsDaoImpl extends ResourceDetailsDaoBase implements AccountDetailsDao, ScopedConfigStorage { protected final SearchBuilder accountSearch; @Inject @@ -56,16 +55,16 @@ public class AccountDetailsDaoImpl extends GenericDaoBase protected AccountDetailsDaoImpl() { accountSearch = createSearchBuilder(); - accountSearch.and("accountId", accountSearch.entity().getAccountId(), Op.EQ); + accountSearch.and("accountId", accountSearch.entity().getResourceId(), Op.EQ); accountSearch.done(); } @Override public Map findDetails(long accountId) { QueryBuilder sc = QueryBuilder.create(AccountDetailVO.class); - sc.and(sc.entity().getAccountId(), Op.EQ, accountId); + sc.and(sc.entity().getResourceId(), Op.EQ, accountId); List results = sc.list(); - Map details = new HashMap(results.size()); + Map details = new HashMap<>(results.size()); for (AccountDetailVO r : results) { details.put(r.getName(), r.getValue()); } @@ -89,11 +88,16 @@ public class AccountDetailsDaoImpl extends GenericDaoBase @Override public AccountDetailVO findDetail(long accountId, String name) { QueryBuilder sc = QueryBuilder.create(AccountDetailVO.class); - sc.and(sc.entity().getAccountId(), Op.EQ, accountId); + sc.and(sc.entity().getResourceId(), Op.EQ, accountId); sc.and(sc.entity().getName(), Op.EQ, name); return sc.find(); } + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new AccountDetailVO(resourceId, key, value)); + } + @Override public void deleteDetails(long accountId) { SearchCriteria sc = accountSearch.create(); @@ -155,11 +159,11 @@ public class AccountDetailsDaoImpl extends GenericDaoBase } @Override - public String getActualValue(AccountDetailVO accountDetailVO) { - ConfigurationVO configurationVO = _configDao.findByName(accountDetailVO.getName()); - if (configurationVO != null && configurationVO.isEncrypted()) { - return DBEncryptionUtil.decrypt(accountDetailVO.getValue()); + public Pair getParentScope(long id) { + Account account = _accountDao.findById(id); + if (account == null) { + return null; } - return accountDetailVO.getValue(); + return new Pair<>(getScope().getParent(), account.getDomainId()); } } diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index d204f67dc93..e4fcbad6b02 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -33,11 +33,11 @@ import javax.persistence.Table; import javax.persistence.Transient; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.lang3.StringUtils; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.commons.lang3.StringUtils; @Entity @Table(name = "user") @@ -131,12 +131,6 @@ public class UserAccountVO implements UserAccount, InternalIdentity { public UserAccountVO() { } - @Override - public String toString() { - return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields - (this, "id", "uuid", "username", "accountName")); - } - @Override public long getId() { return id; @@ -379,4 +373,10 @@ public class UserAccountVO implements UserAccount, InternalIdentity { public void setDetails(Map details) { this.details = details; } + + @Override + public String toString() { + return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields + (this, "id", "uuid", "username", "accountName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java index cb19748fda4..af32163b4c7 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java @@ -45,7 +45,7 @@ public interface ConsoleProxyDao extends GenericDao { public List getDatacenterSessionLoadMatrix(); - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public List> getProxyLoadMatrix(); diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java index ef94a4d9f72..bc79194a10f 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; - import org.springframework.stereotype.Component; import com.cloud.info.ConsoleProxyLoadInfo; @@ -76,11 +75,11 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im private static final String GET_PROXY_ACTIVE_LOAD = "SELECT active_session AS count" + " FROM console_proxy" + " WHERE id=?"; - private static final String STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph WHERE " + + "ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ?"; - private static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.pool_type <> 'LVM' AND p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph " + + "WHERE ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ? AND p.pool_type NOT IN ('LVM', 'Filesystem')"; protected SearchBuilder DataCenterStatusSearch; protected SearchBuilder StateSearch; @@ -219,28 +218,23 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im } @Override - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes) { - ArrayList> l = new ArrayList>(); - + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; + String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - ; - PreparedStatement pstmt = null; - try { - if (countAllPoolTypes) { - pstmt = txn.prepareAutoCloseStatement(STORAGE_POOL_HOST_INFO); - } else { - pstmt = txn.prepareAutoCloseStatement(SHARED_STORAGE_POOL_HOST_INFO); - } + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("Caught SQLException: ", e); } - return l; + return false; } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java index 887b3d73087..44866c0a358 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java @@ -170,8 +170,7 @@ public class NicIpAliasDaoImpl extends GenericDaoBase implem public Integer countAliasIps(long id) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instanceId", id); - List list = listBy(sc); - return list.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 52bc5aac7e2..823642d8c3d 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm.dao; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -81,7 +82,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByHostAndState(long hostId, State... states); - List listByTypes(VirtualMachine.Type... types); + int countByTypes(VirtualMachine.Type... types); VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); @@ -144,21 +145,28 @@ public interface VMInstanceDao extends GenericDao, StateDao< */ List listDistinctHostNames(long networkId, VirtualMachine.Type... types); + List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states); + List findByHostInStates(Long hostId, State... states); List listStartingWithNoHostId(); boolean updatePowerState(long instanceId, long powerHostId, VirtualMachine.PowerState powerState, Date wisdomEra); + Map updatePowerState(Map instancePowerStates, + long powerHostId, Date wisdomEra); + void resetVmPowerStateTracking(long instanceId); + void resetVmPowerStateTracking(List instanceId); + void resetHostPowerStateTracking(long hostId); HashMap countVgpuVMs(Long dcId, Long podId, Long clusterId); VMInstanceVO findVMByHostNameInZone(String hostName, long zoneId); - boolean isPowerStateUpToDate(long instanceId); + boolean isPowerStateUpToDate(VMInstanceVO instance); List listNonMigratingVmsByHostEqualsLastHost(long hostId); @@ -170,4 +178,13 @@ public interface VMInstanceDao extends GenericDao, StateDao< List skippedVmIds); Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId); + + List listIdServiceOfferingForUpVmsByHostId(Long hostId); + + List listIdServiceOfferingForVmsMigratingFromHost(Long hostId); + + Map getNameIdMapForVmInstanceNames(Collection names); + + Map getNameIdMapForVmIds(Collection ids); + } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 0e87e6bcb7d..ef10af63bae 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -75,6 +76,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder LHVMClusterSearch; protected SearchBuilder IdStatesSearch; protected SearchBuilder AllFieldsSearch; + protected SearchBuilder IdServiceOfferingIdSelectSearch; protected SearchBuilder ZoneTemplateNonExpungedSearch; protected SearchBuilder TemplateNonExpungedSearch; protected SearchBuilder NameLikeSearch; @@ -101,6 +103,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder BackupSearch; protected SearchBuilder LastHostAndStatesSearch; protected SearchBuilder VmsNotInClusterUsingPool; + protected SearchBuilder IdsPowerStateSelectSearch; @Inject ResourceTagDao tagsDao; @@ -175,6 +178,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem AllFieldsSearch.and("account", AllFieldsSearch.entity().getAccountId(), Op.EQ); AllFieldsSearch.done(); + IdServiceOfferingIdSelectSearch = createSearchBuilder(); + IdServiceOfferingIdSelectSearch.and("host", IdServiceOfferingIdSelectSearch.entity().getHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("lastHost", IdServiceOfferingIdSelectSearch.entity().getLastHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("state", IdServiceOfferingIdSelectSearch.entity().getState(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("states", IdServiceOfferingIdSelectSearch.entity().getState(), Op.IN); + IdServiceOfferingIdSelectSearch.selectFields(IdServiceOfferingIdSelectSearch.entity().getId(), IdServiceOfferingIdSelectSearch.entity().getServiceOfferingId()); + IdServiceOfferingIdSelectSearch.done(); + ZoneTemplateNonExpungedSearch = createSearchBuilder(); ZoneTemplateNonExpungedSearch.and("zone", ZoneTemplateNonExpungedSearch.entity().getDataCenterId(), Op.EQ); ZoneTemplateNonExpungedSearch.and("template", ZoneTemplateNonExpungedSearch.entity().getTemplateId(), Op.EQ); @@ -274,6 +285,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem HostAndStateSearch = createSearchBuilder(); HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ); HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN); + HostAndStateSearch.and("idsNotIn", HostAndStateSearch.entity().getId(), Op.NIN); HostAndStateSearch.done(); StartingWithNoHostSearch = createSearchBuilder(); @@ -323,6 +335,15 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER); VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN); VmsNotInClusterUsingPool.done(); + + IdsPowerStateSelectSearch = createSearchBuilder(); + IdsPowerStateSelectSearch.and("id", IdsPowerStateSelectSearch.entity().getId(), Op.IN); + IdsPowerStateSelectSearch.selectFields(IdsPowerStateSelectSearch.entity().getId(), + IdsPowerStateSelectSearch.entity().getPowerHostId(), + IdsPowerStateSelectSearch.entity().getPowerState(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateCount(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateTime()); + IdsPowerStateSelectSearch.done(); } @Override @@ -458,10 +479,10 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public List listByTypes(Type... types) { + public int countByTypes(Type... types) { SearchCriteria sc = TypesSearch.create(); sc.setParameters("types", (Object[])types); - return listBy(sc); + return getCount(sc); } @Override @@ -897,6 +918,17 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return result; } + @Override + public List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states) { + SearchCriteria sc = HostAndStateSearch.create(); + sc.setParameters("host", hostId); + if (excludingIds != null && !excludingIds.isEmpty()) { + sc.setParameters("idsNotIn", excludingIds.toArray()); + } + sc.setParameters("states", (Object[])states); + return listBy(sc); + } + @Override public List findByHostInStates(Long hostId, State... states) { SearchCriteria sc = HostAndStateSearch.create(); @@ -912,42 +944,109 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - @Override - public boolean updatePowerState(final long instanceId, final long powerHostId, final VirtualMachine.PowerState powerState, Date wisdomEra) { - return Transaction.execute(new TransactionCallback<>() { - @Override - public Boolean doInTransaction(TransactionStatus status) { - boolean needToUpdate = false; - VMInstanceVO instance = findById(instanceId); - if (instance != null - && (null == instance.getPowerStateUpdateTime() - || instance.getPowerStateUpdateTime().before(wisdomEra))) { - Long savedPowerHostId = instance.getPowerHostId(); - if (instance.getPowerState() != powerState - || savedPowerHostId == null - || savedPowerHostId != powerHostId - || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance)) { - instance.setPowerState(powerState); - instance.setPowerHostId(powerHostId); - instance.setPowerStateUpdateCount(1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } else { - // to reduce DB updates, consecutive same state update for more than 3 times - if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { - instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } - } - } - return needToUpdate; + protected List listSelectPowerStateByIds(final List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", ids.toArray()); + return customSearch(sc, null); + } + + protected Integer getPowerUpdateCount(final VMInstanceVO instance, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + if (isStateMismatch) { + return 1; + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + return instance.getPowerStateUpdateCount() + 1; } + } + return null; + } + + @Override + public boolean updatePowerState(final long instanceId, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + return Transaction.execute((TransactionCallback) status -> { + VMInstanceVO instance = findById(instanceId); + if (instance == null) { + return false; + } + // Check if we need to update based on powerStateUpdateTime + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + + if (isStateMismatch) { + instance.setPowerState(powerState); + instance.setPowerHostId(powerHostId); + instance.setPowerStateUpdateCount(1); + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); + } else { + // No need to update if power state is already in sync and count exceeded + return false; + } + instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + update(instanceId, instance); + return true; // Return true since an update occurred + } + return false; }); } + @Override + public Map updatePowerState( + final Map instancePowerStates, long powerHostId, Date wisdomEra) { + Map notUpdated = new HashMap<>(); + List instances = listSelectPowerStateByIds(new ArrayList<>(instancePowerStates.keySet())); + Map updateCounts = new HashMap<>(); + for (VMInstanceVO instance : instances) { + VirtualMachine.PowerState powerState = instancePowerStates.get(instance.getId()); + Integer count = getPowerUpdateCount(instance, powerHostId, powerState, wisdomEra); + if (count != null) { + updateCounts.put(instance.getId(), count); + } else { + notUpdated.put(instance.getId(), powerState); + } + } + if (updateCounts.isEmpty()) { + return notUpdated; + } + StringBuilder sql = new StringBuilder("UPDATE `cloud`.`vm_instance` SET " + + "`power_host` = ?, `power_state_update_time` = now(), `power_state` = CASE "); + updateCounts.keySet().forEach(key -> { + sql.append("WHEN id = ").append(key).append(" THEN '").append(instancePowerStates.get(key)).append("' "); + }); + sql.append("END, `power_state_update_count` = CASE "); + StringBuilder idList = new StringBuilder(); + updateCounts.forEach((key, value) -> { + sql.append("WHEN `id` = ").append(key).append(" THEN ").append(value).append(" "); + idList.append(key).append(","); + }); + idList.setLength(idList.length() - 1); + sql.append("END WHERE `id` IN (").append(idList).append(")"); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql.toString())) { + pstmt.setLong(1, powerHostId); + pstmt.executeUpdate(); + } catch (SQLException e) { + logger.error("Unable to execute update power states SQL from VMs {} due to: {}", + idList, e.getMessage(), e); + return instancePowerStates; + } + return notUpdated; + } + private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerState powerState, final long powerHostId, final VMInstanceVO instance) { State instanceState = instance.getState(); if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) @@ -962,11 +1061,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public boolean isPowerStateUpToDate(final long instanceId) { - VMInstanceVO instance = findById(instanceId); - if(instance == null) { - throw new CloudRuntimeException("checking power state update count on non existing instance " + instanceId); - } + public boolean isPowerStateUpToDate(final VMInstanceVO instance) { return instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT; } @@ -985,6 +1080,25 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem }); } + @Override + public void resetVmPowerStateTracking(List instanceIds) { + if (CollectionUtils.isEmpty(instanceIds)) { + return; + } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", instanceIds.toArray()); + VMInstanceVO vm = createForUpdate(); + vm.setPowerStateUpdateCount(0); + vm.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + UpdateBuilder ub = getUpdateBuilder(vm); + update(ub, sc, null); + } + }); + } + @Override @DB public void resetHostPowerStateTracking(final long hostId) { Transaction.execute(new TransactionCallbackNoReturn() { @@ -1060,6 +1174,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return searchIncludingRemoved(sc, filter, null, false); } + @Override public Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) { SearchCriteria sc = VmsNotInClusterUsingPool.create(); sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); @@ -1069,4 +1184,44 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem List uniqueVms = vms.stream().distinct().collect(Collectors.toList()); return new Pair<>(uniqueVms, uniqueVms.size()); } + + @Override + public List listIdServiceOfferingForUpVmsByHostId(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("host", hostId); + sc.setParameters("states", new Object[] {State.Starting, State.Running, State.Stopping, State.Migrating}); + return customSearch(sc, null); + } + + @Override + public List listIdServiceOfferingForVmsMigratingFromHost(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("lastHost", hostId); + sc.setParameters("state", State.Migrating); + return customSearch(sc, null); + } + + @Override + public Map getNameIdMapForVmInstanceNames(Collection names) { + SearchBuilder sb = createSearchBuilder(); + sb.and("name", sb.entity().getInstanceName(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("name", names.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } + + @Override + public Map getNameIdMapForVmIds(Collection ids) { + SearchBuilder sb = createSearchBuilder(); + sb.and("id", sb.entity().getId(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", ids.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java index fd3c0be18d2..0258c42c52b 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java @@ -58,15 +58,19 @@ public class BackupScheduleVO implements BackupSchedule { @Column(name = "async_job_id") Long asyncJobId; + @Column(name = "max_backups") + Integer maxBackups = 0; + public BackupScheduleVO() { } - public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp) { + public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp, Integer maxBackups) { this.vmId = vmId; this.scheduleType = (short) scheduleType.ordinal(); this.schedule = schedule; this.timezone = timezone; this.scheduledTimestamp = scheduledTimestamp; + this.maxBackups = maxBackups; } @Override @@ -128,4 +132,12 @@ public class BackupScheduleVO implements BackupSchedule { public void setAsyncJobId(Long asyncJobId) { this.asyncJobId = asyncJobId; } + + public Integer getMaxBackups() { + return maxBackups; + } + + public void setMaxBackups(Integer maxBackups) { + this.maxBackups = maxBackups; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index b4cd2f7bada..9ef442baff9 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -88,6 +88,9 @@ public class BackupVO implements Backup { @Column(name = "zone_id") private long zoneId; + @Column(name = "backup_interval_type") + private short backupIntervalType; + @Column(name = "backed_volumes", length = 65535) protected String backedUpVolumes; @@ -208,6 +211,14 @@ public class BackupVO implements Backup { this.zoneId = zoneId; } + public short getBackupIntervalType() { + return backupIntervalType; + } + + public void setBackupIntervalType(short backupIntervalType) { + this.backupIntervalType = backupIntervalType; + } + @Override public Class getEntityType() { return Backup.class; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java index 89a13245b0a..ffd5e5a4a66 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java @@ -35,5 +35,10 @@ public interface BackupDao extends GenericDao { List syncBackups(Long zoneId, Long vmId, List externalBackups); BackupVO getBackupVO(Backup backup); List listByOfferingId(Long backupOfferingId); + + List listBackupsByVMandIntervalType(Long vmId, Backup.Type backupType); + BackupResponse newBackupResponse(Backup backup); + public Long countBackupsForAccount(long accountId); + public Long calculateBackupStorageForAccount(long accountId); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java index 5a9cd062037..b4e1a760282 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java @@ -24,6 +24,7 @@ import java.util.Objects; import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.utils.db.GenericSearchBuilder; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; @@ -60,6 +61,9 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac BackupOfferingDao backupOfferingDao; private SearchBuilder backupSearch; + private GenericSearchBuilder CountBackupsByAccount; + private GenericSearchBuilder CalculateBackupStorageByAccount; + private SearchBuilder ListBackupsByVMandIntervalType; public BackupDaoImpl() { } @@ -72,6 +76,27 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac backupSearch.and("backup_offering_id", backupSearch.entity().getBackupOfferingId(), SearchCriteria.Op.EQ); backupSearch.and("zone_id", backupSearch.entity().getZoneId(), SearchCriteria.Op.EQ); backupSearch.done(); + + CountBackupsByAccount = createSearchBuilder(Long.class); + CountBackupsByAccount.select(null, SearchCriteria.Func.COUNT, null); + CountBackupsByAccount.and("account", CountBackupsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); + CountBackupsByAccount.and("status", CountBackupsByAccount.entity().getStatus(), SearchCriteria.Op.NIN); + CountBackupsByAccount.and("removed", CountBackupsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); + CountBackupsByAccount.done(); + + CalculateBackupStorageByAccount = createSearchBuilder(SumCount.class); + CalculateBackupStorageByAccount.select("sum", SearchCriteria.Func.SUM, CalculateBackupStorageByAccount.entity().getSize()); + CalculateBackupStorageByAccount.and("account", CalculateBackupStorageByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); + CalculateBackupStorageByAccount.and("status", CalculateBackupStorageByAccount.entity().getStatus(), SearchCriteria.Op.NIN); + CalculateBackupStorageByAccount.and("removed", CalculateBackupStorageByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); + CalculateBackupStorageByAccount.done(); + + ListBackupsByVMandIntervalType = createSearchBuilder(); + ListBackupsByVMandIntervalType.and("vmId", ListBackupsByVMandIntervalType.entity().getVmId(), SearchCriteria.Op.EQ); + ListBackupsByVMandIntervalType.and("intervalType", ListBackupsByVMandIntervalType.entity().getBackupIntervalType(), SearchCriteria.Op.EQ); + ListBackupsByVMandIntervalType.and("status", ListBackupsByVMandIntervalType.entity().getStatus(), SearchCriteria.Op.EQ); + ListBackupsByVMandIntervalType.and("removed", ListBackupsByVMandIntervalType.entity().getRemoved(), SearchCriteria.Op.NULL); + ListBackupsByVMandIntervalType.done(); } @Override @@ -142,6 +167,31 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac return listByVmId(zoneId, vmId); } + @Override + public Long countBackupsForAccount(long accountId) { + SearchCriteria sc = CountBackupsByAccount.create(); + sc.setParameters("account", accountId); + sc.setParameters("status", Backup.Status.Error, Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged); + return customSearch(sc, null).get(0); + } + + @Override + public Long calculateBackupStorageForAccount(long accountId) { + SearchCriteria sc = CalculateBackupStorageByAccount.create(); + sc.setParameters("account", accountId); + sc.setParameters("status", Backup.Status.Error, Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged); + return customSearch(sc, null).get(0).sum; + } + + @Override + public List listBackupsByVMandIntervalType(Long vmId, Backup.Type backupType) { + SearchCriteria sc = ListBackupsByVMandIntervalType.create(); + sc.setParameters("vmId", vmId); + sc.setParameters("type", backupType.ordinal()); + sc.setParameters("status", Backup.Status.BackedUp); + return listBy(sc, null); + } + @Override public BackupResponse newBackupResponse(Backup backup) { VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java index e00ccc5abd7..aac2e3bf232 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java @@ -97,6 +97,7 @@ public class BackupScheduleDaoImpl extends GenericDaoBase extends GenericDao * Removes all details for the resource specified * @param resourceId */ - public void removeDetails(long resourceId); + void removeDetails(long resourceId); /** @@ -76,7 +76,7 @@ public interface ResourceDetailsDao extends GenericDao * @param resourceId * @return list of details each implementing ResourceDetail interface */ - public List listDetails(long resourceId); + List listDetails(long resourceId); /** * List details for resourceId having display field = forDisplay value passed in @@ -84,19 +84,23 @@ public interface ResourceDetailsDao extends GenericDao * @param forDisplay * @return */ - public List listDetails(long resourceId, boolean forDisplay); + List listDetails(long resourceId, boolean forDisplay); - public Map listDetailsKeyPairs(long resourceId); + Map listDetailsKeyPairs(long resourceId); - public Map listDetailsKeyPairs(long resourceId, boolean forDisplay); + Map listDetailsKeyPairs(long resourceId, List keys); + + Map listDetailsKeyPairs(long resourceId, boolean forDisplay); Map listDetailsVisibility(long resourceId); - public void saveDetails(List details); + void saveDetails(List details); - public void addDetail(long resourceId, String key, String value, boolean display); + void addDetail(long resourceId, String key, String value, boolean display); - public List findResourceIdsByNameAndValueIn(String name, Object[] values); + List findResourceIdsByNameAndValueIn(String name, Object[] values); - public long batchExpungeForResources(List ids, Long batchSize); + long batchExpungeForResources(List ids, Long batchSize); + + String getActualValue(ResourceDetail resourceDetail); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java index 4205a7823e4..29d3f88fd90 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -19,10 +19,11 @@ package org.apache.cloudstack.resourcedetail; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; -import org.apache.cloudstack.api.ResourceDetail; import org.apache.commons.collections.CollectionUtils; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; @@ -30,7 +31,17 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; +import org.apache.cloudstack.api.ResourceDetail; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; + +import javax.inject.Inject; + public abstract class ResourceDetailsDaoBase extends GenericDaoBase implements ResourceDetailsDao { + + @Inject + private ConfigurationDao configDao; + private SearchBuilder AllFieldsSearch; public ResourceDetailsDaoBase() { @@ -75,8 +86,7 @@ public abstract class ResourceDetailsDaoBase extends G sc.setParameters("value", value); } - List results = search(sc, null); - return results; + return search(sc, null); } public Map listDetailsKeyPairs(long resourceId) { @@ -84,13 +94,27 @@ public abstract class ResourceDetailsDaoBase extends G sc.setParameters("resourceId", resourceId); List results = search(sc, null); - Map details = new HashMap(results.size()); + Map details = new HashMap<>(results.size()); for (R result : results) { details.put(result.getName(), result.getValue()); } return details; } + @Override + public Map listDetailsKeyPairs(long resourceId, List keys) { + SearchBuilder sb = createSearchBuilder(); + sb.and("resourceId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("name", keys.toArray()); + + List results = search(sc, null); + return results.stream().collect(Collectors.toMap(R::getName, R::getValue)); + } + public Map listDetailsVisibility(long resourceId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); @@ -107,8 +131,7 @@ public abstract class ResourceDetailsDaoBase extends G SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); - List results = search(sc, null); - return results; + return search(sc, null); } public void removeDetails(long resourceId) { @@ -170,7 +193,7 @@ public abstract class ResourceDetailsDaoBase extends G sc.setParameters("display", forDisplay); List results = search(sc, null); - Map details = new HashMap(results.size()); + Map details = new HashMap<>(results.size()); for (R result : results) { details.put(result.getName(), result.getValue()); } @@ -182,8 +205,7 @@ public abstract class ResourceDetailsDaoBase extends G sc.setParameters("resourceId", resourceId); sc.setParameters("display", forDisplay); - List results = search(sc, null); - return results; + return search(sc, null); } @Override @@ -215,4 +237,13 @@ public abstract class ResourceDetailsDaoBase extends G sc.setParameters("ids", ids.toArray()); return batchExpunge(sc, batchSize); } + + @Override + public String getActualValue(ResourceDetail resourceDetail) { + ConfigurationVO configurationVO = configDao.findByName(resourceDetail.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(resourceDetail.getValue()); + } + return resourceDetail.getValue(); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java index 14830490600..ec40dc0dd68 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java @@ -20,6 +20,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.inject.Inject; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; @@ -27,6 +29,8 @@ import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; +import com.cloud.storage.ImageStore; +import com.cloud.utils.Pair; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; @@ -36,6 +40,8 @@ import com.cloud.utils.db.TransactionLegacy; @Component public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase implements ImageStoreDetailsDao, ScopedConfigStorage { + @Inject + ImageStoreDao imageStoreDao; protected final SearchBuilder storeSearch; @@ -67,7 +73,7 @@ public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase details = listBy(sc); - Map detailsMap = new HashMap(); + Map detailsMap = new HashMap<>(); for (ImageStoreDetailVO detail : details) { String name = detail.getName(); String value = detail.getValue(); @@ -110,9 +116,24 @@ public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase key) { + ImageStoreDetailVO vo = findDetail(id, key.key()); + return vo == null ? null : getActualValue(vo); + } + @Override public void addDetail(long resourceId, String key, String value, boolean display) { super.addDetail(new ImageStoreDetailVO(resourceId, key, value, display)); } + @Override + public Pair getParentScope(long id) { + ImageStore store = imageStoreDao.findById(id); + if (store == null) { + return null; + } + return new Pair<>(getScope().getParent(), store.getDataCenterId()); + } + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 1658fe0a537..cb7313954dc 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,20 +28,20 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.db.Filter; import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolTagVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -755,7 +755,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase if (keyword != null) { SearchCriteria ssc = createSearchCriteria(); ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); - ssc.addOr("poolType", SearchCriteria.Op.LIKE, new Storage.StoragePoolType("%" + keyword + "%")); + ssc.addOr("poolType", SearchCriteria.Op.LIKE, "%" + keyword + "%"); sc.addAnd("name", SearchCriteria.Op.SC, ssc); } diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_update_api_permission.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_update_api_permission.sql new file mode 100644 index 00000000000..c53e0067061 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_update_api_permission.sql @@ -0,0 +1,52 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION` ( + IN role VARCHAR(255), + IN rule VARCHAR(255), + IN permission VARCHAR(255) +) +BEGIN + DECLARE role_id BIGINT(20) UNSIGNED +; DECLARE max_sort_order BIGINT(20) UNSIGNED + +; SELECT `r`.`id` INTO role_id + FROM `cloud`.`roles` `r` + WHERE `r`.`name` = role + AND `r`.`is_default` = 1 + +; SELECT MAX(`rp`.`sort_order`) INTO max_sort_order + FROM `cloud`.`role_permissions` `rp` + WHERE `rp`.`role_id` = role_id + +; IF NOT EXISTS ( + SELECT * FROM `cloud`.`role_permissions` `rp` + WHERE `rp`.`role_id` = role_id + AND `rp`.`rule` = rule + ) THEN + UPDATE `cloud`.`role_permissions` `rp` + SET `rp`.`sort_order` = max_sort_order + 1 + WHERE `rp`.`sort_order` = max_sort_order + AND `rp`.`role_id` = role_id + +; INSERT INTO `cloud`.`role_permissions` + (uuid, role_id, rule, permission, sort_order) + VALUES (uuid(), role_id, rule, permission, max_sort_order) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql b/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql index 2ce8ea99bd1..12ead739d84 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41910to41920.sql @@ -21,3 +21,25 @@ -- Add last_id to the volumes table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'last_id', 'bigint(20) unsigned DEFAULT NULL'); + +-- Grant access to 2FA APIs for the "Read-Only User - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'listUserTwoFactorAuthenticatorProviders', 'ALLOW'); + +-- Grant access to 2FA APIs for the "Support User - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support User - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support User - Default', 'listUserTwoFactorAuthenticatorProviders', 'ALLOW'); + +-- Grant access to 2FA APIs for the "Read-Only Admin - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only Admin - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only Admin - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); + +-- Grant access to 2FA APIs for the "Support Admin - Default" role + +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support Admin - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); +CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Support Admin - Default', 'validateUserTwoFactorAuthenticationCode', 'ALLOW'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql index 976ef217832..bf13e5eee1a 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql @@ -22,6 +22,15 @@ -- Add column api_key_access to user and account tables CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.user', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the user" AFTER `secret_key`'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" '); +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" '); + +-- Create a new group for Usage Server related configurations +INSERT INTO `cloud`.`configuration_group` (`name`, `description`, `precedence`) VALUES ('Usage Server', 'Usage Server related configuration', 9); +UPDATE `cloud`.`configuration_subgroup` set `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server'), `precedence` = 1 WHERE `name`='Usage'; +UPDATE `cloud`.`configuration` SET `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server') where `subgroup_id` = (SELECT `id` FROM `cloud`.`configuration_subgroup` WHERE `name` = 'Usage'); + +-- Update the description to indicate this setting applies only to volume snapshots on running instances +UPDATE `cloud`.`configuration` SET `description`='whether volume snapshot is enabled on running instances on KVM hosts' WHERE `name`='kvm.snapshot.enabled'; -- Modify index for mshost_peer DELETE FROM `cloud`.`mshost_peer`; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql index 9199b855055..7fb45de7399 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql @@ -19,6 +19,10 @@ -- Schema upgrade from 4.20.1.0 to 4.21.0.0 --; +-- Add columns max_backup and backup_interval_type to backup table +ALTER TABLE `cloud`.`backup_schedule` ADD COLUMN `max_backups` int(8) default NULL COMMENT 'maximum number of backups to maintain'; +ALTER TABLE `cloud`.`backups` ADD COLUMN `backup_interval_type` int(5) COMMENT 'type of backup, e.g. manual, recurring - hourly, daily, weekly or monthly'; + -- Add console_endpoint_creator_address column to cloud.console_session table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.console_session', 'console_endpoint_creator_address', 'VARCHAR(45)'); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.account_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.account_view.sql index dc64380fb57..6092fe8e845 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.account_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.account_view.sql @@ -68,6 +68,14 @@ select `primary_storage_count`.`count` AS `primaryStorageTotal`, `secondary_storage_limit`.`max` AS `secondaryStorageLimit`, `secondary_storage_count`.`count` AS `secondaryStorageTotal`, + `backup_limit`.`max` AS `backupLimit`, + `backup_count`.`count` AS `backupTotal`, + `backup_storage_limit`.`max` AS `backupStorageLimit`, + `backup_storage_count`.`count` AS `backupStorageTotal`, + `bucket_limit`.`max` AS `bucketLimit`, + `bucket_count`.`count` AS `bucketTotal`, + `object_storage_limit`.`max` AS `objectStorageLimit`, + `object_storage_count`.`count` AS `objectStorageTotal`, `async_job`.`id` AS `job_id`, `async_job`.`uuid` AS `job_uuid`, `async_job`.`job_status` AS `job_status`, @@ -160,6 +168,30 @@ from `cloud`.`resource_count` secondary_storage_count ON account.id = secondary_storage_count.account_id and secondary_storage_count.type = 'secondary_storage' left join + `cloud`.`resource_limit` backup_limit ON account.id = backup_limit.account_id + and backup_limit.type = 'backup' + left join + `cloud`.`resource_count` backup_count ON account.id = backup_count.account_id + and backup_count.type = 'backup' + left join + `cloud`.`resource_limit` backup_storage_limit ON account.id = backup_storage_limit.account_id + and backup_storage_limit.type = 'backup_storage' + left join + `cloud`.`resource_count` backup_storage_count ON account.id = backup_storage_count.account_id + and backup_storage_count.type = 'backup_storage' + left join + `cloud`.`resource_limit` bucket_limit ON account.id = bucket_limit.account_id + and bucket_limit.type = 'bucket' + left join + `cloud`.`resource_count` bucket_count ON account.id = bucket_count.account_id + and bucket_count.type = 'bucket' + left join + `cloud`.`resource_limit` object_storage_limit ON account.id = object_storage_limit.account_id + and object_storage_limit.type = 'object_storage' + left join + `cloud`.`resource_count` object_storage_count ON account.id = object_storage_count.account_id + and object_storage_count.type = 'object_storage' + left join `cloud`.`async_job` ON async_job.instance_id = account.id and async_job.instance_type = 'Account' and async_job.job_status = 0; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql index 201ece95023..c9f7bfc51e4 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql @@ -58,7 +58,15 @@ select `primary_storage_limit`.`max` AS `primaryStorageLimit`, `primary_storage_count`.`count` AS `primaryStorageTotal`, `secondary_storage_limit`.`max` AS `secondaryStorageLimit`, - `secondary_storage_count`.`count` AS `secondaryStorageTotal` + `secondary_storage_count`.`count` AS `secondaryStorageTotal`, + `backup_limit`.`max` AS `backupLimit`, + `backup_count`.`count` AS `backupTotal`, + `backup_storage_limit`.`max` AS `backupStorageLimit`, + `backup_storage_count`.`count` AS `backupStorageTotal`, + `bucket_limit`.`max` AS `bucketLimit`, + `bucket_count`.`count` AS `bucketTotal`, + `object_storage_limit`.`max` AS `objectStorageLimit`, + `object_storage_count`.`count` AS `objectStorageTotal` from `cloud`.`domain` left join @@ -132,4 +140,28 @@ from and secondary_storage_limit.type = 'secondary_storage' left join `cloud`.`resource_count` secondary_storage_count ON domain.id = secondary_storage_count.domain_id - and secondary_storage_count.type = 'secondary_storage'; + and secondary_storage_count.type = 'secondary_storage' + left join + `cloud`.`resource_limit` backup_limit ON domain.id = backup_limit.domain_id + and backup_limit.type = 'backup' + left join + `cloud`.`resource_count` backup_count ON domain.id = backup_count.domain_id + and backup_count.type = 'backup' + left join + `cloud`.`resource_limit` backup_storage_limit ON domain.id = backup_storage_limit.domain_id + and backup_storage_limit.type = 'backup_storage' + left join + `cloud`.`resource_count` backup_storage_count ON domain.id = backup_storage_count.domain_id + and backup_storage_count.type = 'backup_storage' + left join + `cloud`.`resource_limit` bucket_limit ON domain.id = bucket_limit.domain_id + and bucket_limit.type = 'bucket' + left join + `cloud`.`resource_count` bucket_count ON domain.id = bucket_count.domain_id + and bucket_count.type = 'bucket' + left join + `cloud`.`resource_limit` object_storage_limit ON domain.id = object_storage_limit.domain_id + and object_storage_limit.type = 'object_storage' + left join + `cloud`.`resource_count` object_storage_count ON domain.id = object_storage_count.domain_id + and object_storage_count.type = 'object_storage'; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql index b6abaabcd48..640b2397a46 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql @@ -76,13 +76,9 @@ SELECT FROM `cloud`.`network_offerings` LEFT JOIN - `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid' + `cloud`.`domain` AS `domain` ON `domain`.id IN (SELECT value from `network_offering_details` where `name` = 'domainid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + `cloud`.`data_center` AS `zone` ON `zone`.`id` IN (SELECT value from `network_offering_details` where `name` = 'zoneid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN `cloud`.`network_offering_details` AS `offering_details` ON `offering_details`.`network_offering_id` = `network_offerings`.`id` AND `offering_details`.`name`='internetProtocol' GROUP BY diff --git a/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java new file mode 100644 index 00000000000..76c1092546a --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.capacity.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.capacity.CapacityVO; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class CapacityDaoImplTest { + @Spy + @InjectMocks + CapacityDaoImpl capacityDao = new CapacityDaoImpl(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + CapacityVO capacityVO = mock(CapacityVO.class); + when(searchBuilder.entity()).thenReturn(capacityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(capacityDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListByHostIdTypes() { + // Prepare inputs + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + CapacityVO capacity1 = new CapacityVO(); + CapacityVO capacity2 = new CapacityVO(); + List mockResult = Arrays.asList(capacity1, capacity2); + doReturn(mockResult).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertEquals(2, result.size()); + assertSame(capacity1, result.get(0)); + assertSame(capacity2, result.get(1)); + } + + @Test + public void testListByHostIdTypesEmptyResult() { + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + doReturn(Collections.emptyList()).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(Mockito.eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java new file mode 100644 index 00000000000..a513809be05 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.dc.ClusterVO; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterDaoImplTest { + @Spy + @InjectMocks + ClusterDaoImpl clusterDao = new ClusterDaoImpl(); + + private GenericSearchBuilder genericSearchBuilder; + + @Before + public void setUp() { + genericSearchBuilder = mock(SearchBuilder.class); + ClusterVO entityVO = mock(ClusterVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(clusterDao).createSearchBuilder(Long.class); + } + + @Test + public void testListAllIds() { + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + doReturn(Collections.emptyList()).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java b/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java new file mode 100644 index 00000000000..81163321c6b --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.host.dao; + +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class HostDaoImplTest { + + @Spy + HostDaoImpl hostDao = new HostDaoImpl(); + + @Mock + private SearchBuilder mockSearchBuilder; + @Mock + private SearchCriteria mockSearchCriteria; + + @Test + public void testCountUpAndEnabledHostsInZone() { + long testZoneId = 100L; + hostDao.HostTypeCountSearch = mockSearchBuilder; + Mockito.when(mockSearchBuilder.create()).thenReturn(mockSearchCriteria); + Mockito.doNothing().when(mockSearchCriteria).setParameters(Mockito.anyString(), Mockito.any()); + int expected = 5; + Mockito.doReturn(expected).when(hostDao).getCount(mockSearchCriteria); + Integer count = hostDao.countUpAndEnabledHostsInZone(testZoneId); + Assert.assertSame(expected, count); + Mockito.verify(mockSearchCriteria).setParameters("type", Host.Type.Routing); + Mockito.verify(mockSearchCriteria).setParameters("resourceState", ResourceState.Enabled); + Mockito.verify(mockSearchCriteria).setParameters("zoneId", testZoneId); + Mockito.verify(hostDao).getCount(mockSearchCriteria); + } + + @Test + public void testCountAllHostsAndCPUSocketsByType() { + Host.Type type = Host.Type.Routing; + GenericDaoBase.SumCount mockSumCount = new GenericDaoBase.SumCount(); + mockSumCount.count = 10; + mockSumCount.sum = 20; + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(GenericDaoBase.SumCount.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(List.of(mockSumCount)).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + Pair result = hostDao.countAllHostsAndCPUSocketsByType(type); + Assert.assertEquals(10, result.first().intValue()); + Assert.assertEquals(20, result.second().intValue()); + Mockito.verify(sc).setParameters("type", type); + } + + @Test + public void testIsHostUp() { + long testHostId = 101L; + List statuses = List.of(Status.Up); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Status.class); + Mockito.doReturn(statuses).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + boolean result = hostDao.isHostUp(testHostId); + Assert.assertTrue("Host should be up", result); + Mockito.verify(sc).setParameters("id", testHostId); + Mockito.verify(hostDao).customSearch(sc, null); + } + + @Test + public void testFindHostIdsByZoneClusterResourceStateTypeAndHypervisorType() { + Long zoneId = 1L; + Long clusterId = 2L; + List resourceStates = List.of(ResourceState.Enabled); + List types = List.of(Host.Type.Routing); + List hypervisorTypes = List.of(Hypervisor.HypervisorType.KVM); + List mockResults = List.of(1001L, 1002L); // Mocked result + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.when(sb.and()).thenReturn(sb); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Long.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hostIds = hostDao.findHostIdsByZoneClusterResourceStateTypeAndHypervisorType( + zoneId, clusterId, resourceStates, types, hypervisorTypes); + Assert.assertEquals(mockResults, hostIds); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("clusterId", clusterId); + Mockito.verify(sc).setParameters("resourceState", resourceStates.toArray()); + Mockito.verify(sc).setParameters("type", types.toArray()); + Mockito.verify(sc).setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + + @Test + public void testListDistinctHypervisorTypes() { + Long zoneId = 1L; + List mockResults = List.of(Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.XenServer); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Hypervisor.HypervisorType.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hypervisorTypes = hostDao.listDistinctHypervisorTypes(zoneId); + Assert.assertEquals(mockResults, hypervisorTypes); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("type", Host.Type.Routing); + } + + @Test + public void testListByIds() { + List ids = List.of(101L, 102L); + List mockResults = List.of(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + hostDao.IdsSearch = mockSearchBuilder; + Mockito.when(mockSearchBuilder.create()).thenReturn(mockSearchCriteria); + Mockito.doReturn(mockResults).when(hostDao).search(Mockito.any(SearchCriteria.class), Mockito.any()); + List hosts = hostDao.listByIds(ids); + Assert.assertEquals(mockResults, hosts); + Mockito.verify(mockSearchCriteria).setParameters("id", ids.toArray()); + Mockito.verify(hostDao).search(mockSearchCriteria, null); + } + + @Test + public void testListIdsBy() { + Host.Type type = Host.Type.Routing; + Status status = Status.Up; + ResourceState resourceState = ResourceState.Enabled; + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + Long zoneId = 1L, podId = 2L, clusterId = 3L; + List mockResults = List.of(1001L, 1002L); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Long.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hostIds = hostDao.listIdsBy(type, status, resourceState, hypervisorType, zoneId, podId, clusterId); + Assert.assertEquals(mockResults, hostIds); + Mockito.verify(sc).setParameters("type", type); + Mockito.verify(sc).setParameters("status", status); + Mockito.verify(sc).setParameters("resourceState", resourceState); + Mockito.verify(sc).setParameters("hypervisorType", hypervisorType); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("podId", podId); + Mockito.verify(sc).setParameters("clusterId", clusterId); + } +} diff --git a/engine/schema/src/test/java/com/cloud/upgrade/ConfigurationGroupsAggregatorTest.java b/engine/schema/src/test/java/com/cloud/upgrade/ConfigurationGroupsAggregatorTest.java new file mode 100644 index 00000000000..bab36ef00cf --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/upgrade/ConfigurationGroupsAggregatorTest.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade; + +import static org.mockito.Mockito.when; + +import java.util.Collections; + +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationSubGroupVO; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; +import org.apache.logging.log4j.Logger; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ConfigurationGroupsAggregatorTest { + @InjectMocks + private ConfigurationGroupsAggregator configurationGroupsAggregator = new ConfigurationGroupsAggregator(); + + @Mock + private ConfigurationDao configDao; + + @Mock + private ConfigurationGroupDao configGroupDao; + + @Mock + private ConfigurationSubGroupDao configSubGroupDao; + + @Mock + private Logger logger; + + @Test + public void testUpdateConfigurationGroups() { + ConfigurationVO config = new ConfigurationVO("Advanced", "DEFAULT", "management-server", + "test.config.name", null, "description"); + config.setGroupId(1L); + config.setSubGroupId(1L); + + when(configDao.searchPartialConfigurations()).thenReturn(Collections.singletonList(config)); + + ConfigurationSubGroupVO configSubGroup = Mockito.mock(ConfigurationSubGroupVO.class); + when(configSubGroupDao.findByName("name")).thenReturn(configSubGroup); + Mockito.when(configSubGroup.getId()).thenReturn(10L); + Mockito.when(configSubGroup.getGroupId()).thenReturn(5L); + + configurationGroupsAggregator.updateConfigurationGroups(); + + Assert.assertEquals(Long.valueOf(5), config.getGroupId()); + Assert.assertEquals(Long.valueOf(10), config.getSubGroupId()); + Mockito.verify(configDao, Mockito.times(1)).persist(config); + Mockito.verify(logger, Mockito.times(1)).debug("Updating configuration groups"); + Mockito.verify(logger, Mockito.times(1)).debug("Successfully updated configuration groups."); + } +} diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java index 4c07abda938..0c5a99ca05f 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java @@ -511,4 +511,57 @@ public class DatabaseAccessObjectTest { verify(loggerMock, times(1)).warn(anyString(), eq(sqlException)); } + @Test + public void testGetColumnType() throws Exception { + when(connectionMock.prepareStatement(contains("DESCRIBE"))).thenReturn(preparedStatementMock); + when(preparedStatementMock.executeQuery()).thenReturn(resultSetMock); + when(resultSetMock.next()).thenReturn(true); + when(resultSetMock.getString("Type")).thenReturn("type"); + + Connection conn = connectionMock; + String tableName = "tableName"; + String columnName = "columnName"; + + Assert.assertEquals("type", dao.getColumnType(conn, tableName, columnName)); + + verify(connectionMock, times(1)).prepareStatement(anyString()); + verify(preparedStatementMock, times(1)).executeQuery(); + verify(preparedStatementMock, times(1)).close(); + verify(loggerMock, times(0)).debug(anyString()); + } + + @Test + public void testAddColumn() throws Exception { + when(connectionMock.prepareStatement(contains("ADD COLUMN"))).thenReturn(preparedStatementMock); + when(preparedStatementMock.executeUpdate()).thenReturn(1); + + Connection conn = connectionMock; + String tableName = "tableName"; + String columnName = "columnName"; + String columnType = "columnType"; + + dao.addColumn(conn, tableName, columnName, columnType); + + verify(connectionMock, times(1)).prepareStatement(anyString()); + verify(preparedStatementMock, times(1)).executeUpdate(); + verify(preparedStatementMock, times(1)).close(); + } + + @Test + public void testChangeColumn() throws Exception { + when(connectionMock.prepareStatement(contains("CHANGE COLUMN"))).thenReturn(preparedStatementMock); + when(preparedStatementMock.executeUpdate()).thenReturn(1); + + Connection conn = connectionMock; + String tableName = "tableName"; + String columnName = "columnName"; + String newColumnName = "columnName2"; + String columnDefinition = "columnDefinition"; + + dao.changeColumn(conn, tableName, columnName, newColumnName, columnDefinition); + + verify(connectionMock, times(1)).prepareStatement(anyString()); + verify(preparedStatementMock, times(1)).executeUpdate(); + verify(preparedStatementMock, times(1)).close(); + } } diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/DbUpgradeUtilsTest.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/DbUpgradeUtilsTest.java index 1b775406466..d892b172c10 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/dao/DbUpgradeUtilsTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/DbUpgradeUtilsTest.java @@ -159,4 +159,33 @@ public class DbUpgradeUtilsTest { verify(daoMock, times(1)).columnExists(conn, tableName, column3); verify(daoMock, times(1)).dropColumn(conn, tableName, column3); } + + @Test + public void testAddTableColumnIfNotExist() throws Exception { + Connection conn = connectionMock; + String tableName = "tableName"; + String columnName = "columnName"; + String columnDefinition = "columnDefinition"; + when(daoMock.columnExists(conn, tableName, columnName)).thenReturn(false); + + DbUpgradeUtils.addTableColumnIfNotExist(conn, tableName, columnName, columnDefinition); + + verify(daoMock, times(1)).columnExists(conn, tableName, columnName); + verify(daoMock, times(1)).addColumn(conn, tableName, columnName, columnDefinition); + } + + @Test + public void testChangeTableColumnIfNotExist() throws Exception { + Connection conn = connectionMock; + String tableName = "tableName"; + String oldColumnName = "oldColumnName"; + String newColumnName = "newColumnName"; + String columnDefinition = "columnDefinition"; + when(daoMock.columnExists(conn, tableName, oldColumnName)).thenReturn(true); + + DbUpgradeUtils.changeTableColumnIfNotExist(conn, tableName, oldColumnName, newColumnName, columnDefinition); + + verify(daoMock, times(1)).columnExists(conn, tableName, oldColumnName); + verify(daoMock, times(1)).changeColumn(conn, tableName, oldColumnName, newColumnName, columnDefinition); + } } diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade42010to42100Test.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade42010to42100Test.java new file mode 100644 index 00000000000..035790f0716 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/Upgrade42010to42100Test.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import static org.mockito.Mockito.when; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.TransactionLegacy; + +@RunWith(MockitoJUnitRunner.class) +public class Upgrade42010to42100Test { + @Spy + Upgrade42010to42100 upgrade; + + @Mock + private Connection conn; + + @Test + public void testPerformDataMigration() throws SQLException { + try (MockedStatic ignored = Mockito.mockStatic(DbUpgradeUtils.class)) { + DbUpgradeUtils dbUpgradeUtils = Mockito.mock(DbUpgradeUtils.class); + when(dbUpgradeUtils.getTableColumnType(conn, "configuration", "scope")).thenReturn("varchar(255)"); + + try (MockedStatic ignored2 = Mockito.mockStatic(TransactionLegacy.class)) { + TransactionLegacy txn = Mockito.mock(TransactionLegacy.class); + when(TransactionLegacy.currentTxn()).thenReturn(txn); + PreparedStatement pstmt = Mockito.mock(PreparedStatement.class); + String sql = "UPDATE configuration\n" + + "SET new_scope =" + + " CASE" + + " WHEN scope = 'Global' THEN 1" + + " WHEN scope = 'Zone' THEN 2" + + " WHEN scope = 'Cluster' THEN 4" + + " WHEN scope = 'StoragePool' THEN 8" + + " WHEN scope = 'ManagementServer' THEN 16" + + " WHEN scope = 'ImageStore' THEN 32" + + " WHEN scope = 'Domain' THEN 64" + + " WHEN scope = 'Account' THEN 128" + + " ELSE 0" + + " END WHERE scope IS NOT NULL;"; + when(txn.prepareAutoCloseStatement(sql)).thenReturn(pstmt); + upgrade.performDataMigration(conn); + + Mockito.verify(pstmt, Mockito.times(1)).executeUpdate(); + } + } + } +} diff --git a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java index 05d9154b6a4..fa47d2cd90b 100644 --- a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java @@ -23,12 +23,9 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.sql.PreparedStatement; -import com.cloud.utils.DateUtil; -import com.cloud.utils.db.TransactionLegacy; import java.util.Date; import java.util.TimeZone; -import com.cloud.usage.UsageStorageVO; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -36,6 +33,10 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import com.cloud.usage.UsageStorageVO; +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.TransactionLegacy; + @RunWith(MockitoJUnitRunner.class) public class UsageStorageDaoImplTest { diff --git a/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java new file mode 100644 index 00000000000..4c54599c396 --- /dev/null +++ b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class ResourceDetailsDaoBaseTest { + @Spy + @InjectMocks + TestDetailsDao testDetailsDao = new TestDetailsDao(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + searchCriteria = mock(SearchCriteria.class); + TestDetailVO entityVO = mock(TestDetailVO.class); + when(searchBuilder.entity()).thenReturn(entityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(testDetailsDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListDetailsKeyPairs() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + TestDetailVO result1 = mock(TestDetailVO.class); + when(result1.getName()).thenReturn("key1"); + when(result1.getValue()).thenReturn("value1"); + TestDetailVO result2 = mock(TestDetailVO.class); + when(result2.getName()).thenReturn("key2"); + when(result2.getValue()).thenReturn("value2"); + List mockResults = Arrays.asList(result1, result2); + doReturn(mockResults).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertEquals(2, result.size()); + assertEquals("value1", result.get("key1")); + assertEquals("value2", result.get("key2")); + } + + @Test + public void testListDetailsKeyPairsEmptyResult() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + doReturn(Collections.emptyList()).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertTrue(result.isEmpty()); + } + + protected static class TestDetailsDao extends ResourceDetailsDaoBase { + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new TestDetailVO(resourceId, key, value, display)); + } + } + + @Entity + @Table(name = "test_details") + protected static class TestDetailVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "resource_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value") + private String value; + + @Column(name = "display") + private boolean display = true; + + public TestDetailVO() { + } + + public TestDetailVO(long resourceId, String name, String value, boolean display) { + this.resourceId = resourceId; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } + + public void setName(String name) { + this.name = name; + } + + public void setValue(String value) { + this.value = value; + } + } +} diff --git a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java index bfcc38ba104..fc41a82e71d 100755 --- a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java +++ b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java @@ -17,12 +17,17 @@ package org.apache.cloudstack.storage.datastore.db; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.IOException; import java.sql.SQLException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +39,15 @@ import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; import junit.framework.TestCase; -import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class PrimaryDataStoreDaoImplTest extends TestCase { @@ -59,6 +66,8 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { @Mock StoragePoolVO storagePoolVO; + private GenericSearchBuilder genericSearchBuilder; + private static final String STORAGE_TAG_1 = "NFS-A"; private static final String STORAGE_TAG_2 = "NFS-B"; private static final String[] STORAGE_TAGS_ARRAY = {STORAGE_TAG_1, STORAGE_TAG_2}; @@ -155,4 +164,32 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { String expectedSql = primaryDataStoreDao.DetailsSqlPrefix + SQL_VALUES + primaryDataStoreDao.DetailsSqlSuffix; verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_POOL_DETAILS.size()); } + + @Test + public void testListAllIds() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + doReturn(Collections.emptyList()).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 3456731ef1c..2f1227a91a5 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -40,6 +40,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -1534,6 +1535,16 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { verifyFormat(templateInfo.getFormat()); } + // this blurb handles the case where the storage system can clone a volume from a template + String canCloneVolumeFromTemplate = templateInfo.getDataStore().getDriver().getCapabilities().get("CAN_CLONE_VOLUME_FROM_TEMPLATE"); + if (canCloneVolumeFromTemplate != null && canCloneVolumeFromTemplate.toLowerCase().equals("true")) { + DataStoreDriver driver = templateInfo.getDataStore().getDriver(); + driver.createAsync(volumeInfo.getDataStore(), volumeInfo, null); + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + driver.copyAsync(templateInfo, volumeInfo, null); + return; + } + HostVO hostVO = null; final boolean computeClusterSupportsVolumeClone; @@ -1641,7 +1652,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Create volume from template failed: " + ex.getMessage(); } - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (copyCmdAnswer == null) { @@ -2634,7 +2645,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (copyCmdAnswer == null) { diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index d2f08260aa3..0fedf746fa6 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -20,7 +20,6 @@ package org.apache.cloudstack.storage.image.manager; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -180,28 +179,14 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager, @Override public DataStore getImageStoreWithFreeCapacity(List imageStores) { - if (imageStores.size() > 1) { - imageStores.sort(new Comparator() { // Sort data stores based on free capacity - @Override - public int compare(DataStore store1, DataStore store2) { - return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1), - _statsCollector.imageStoreCurrentFreeCapacity(store2)); - } - }); - for (DataStore imageStore : imageStores) { - // Return image store if used percentage is less then threshold value i.e. 90%. - if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - return imageStore; - } - } - } else if (imageStores.size() == 1) { - if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) { - return imageStores.get(0); + imageStores.sort((store1, store2) -> Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store2), + _statsCollector.imageStoreCurrentFreeCapacity(store1))); + for (DataStore imageStore : imageStores) { + if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { + return imageStore; } } - - // No store with space found - logger.error(String.format("Can't find an image storage in zone with less than %d usage", + logger.error(String.format("Could not find an image storage in zone with less than %d usage", Math.round(_statsCollector.getImageStoreCapacityThreshold() * 100))); return null; } @@ -209,23 +194,11 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager, @Override public List orderImageStoresOnFreeCapacity(List imageStores) { List stores = new ArrayList<>(); - if (imageStores.size() > 1) { - imageStores.sort(new Comparator() { // Sort data stores based on free capacity - @Override - public int compare(DataStore store1, DataStore store2) { - return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1), - _statsCollector.imageStoreCurrentFreeCapacity(store2)); - } - }); - for (DataStore imageStore : imageStores) { - // Return image store if used percentage is less then threshold value i.e. 90%. - if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - stores.add(imageStore); - } - } - } else if (imageStores.size() == 1) { - if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) { - stores.add(imageStores.get(0)); + imageStores.sort((store1, store2) -> Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store2), + _statsCollector.imageStoreCurrentFreeCapacity(store1))); + for (DataStore imageStore : imageStores) { + if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { + stores.add(imageStore); } } return stores; diff --git a/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImplTest.java b/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImplTest.java index c0462034790..72acd65931a 100644 --- a/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImplTest.java +++ b/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImplTest.java @@ -16,6 +16,9 @@ // under the License. package org.apache.cloudstack.storage.image.manager; +import com.cloud.server.StatsCollector; +import com.cloud.utils.Pair; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.junit.Assert; @@ -26,14 +29,22 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + @RunWith(MockitoJUnitRunner.class) public class ImageStoreProviderManagerImplTest { @Mock ImageStoreDao imageStoreDao; + @Mock + StatsCollector statsCollectorMock; + @InjectMocks ImageStoreProviderManagerImpl imageStoreProviderManager = new ImageStoreProviderManagerImpl(); + @Test public void testGetImageStoreZoneId() { final long storeId = 1L; @@ -44,4 +55,56 @@ public class ImageStoreProviderManagerImplTest { long value = imageStoreProviderManager.getImageStoreZoneId(storeId); Assert.assertEquals(zoneId, value); } + + private Pair, List> prepareUnorderedAndOrderedImageStoresForCapacityTests(boolean hasStoragesWithEnoughCapacity) { + DataStore store1 = Mockito.mock(DataStore.class); + Mockito.doReturn(100L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store1); + Mockito.doReturn(false).when(statsCollectorMock).imageStoreHasEnoughCapacity(store1); + DataStore store2 = Mockito.mock(DataStore.class); + Mockito.doReturn(200L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store2); + Mockito.doReturn(hasStoragesWithEnoughCapacity).when(statsCollectorMock).imageStoreHasEnoughCapacity(store2); + DataStore store3 = Mockito.mock(DataStore.class); + Mockito.doReturn(300L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store3); + Mockito.doReturn(hasStoragesWithEnoughCapacity).when(statsCollectorMock).imageStoreHasEnoughCapacity(store3); + DataStore store4 = Mockito.mock(DataStore.class); + Mockito.doReturn(400L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store4); + Mockito.doReturn(false).when(statsCollectorMock).imageStoreHasEnoughCapacity(store4); + + List unordered = Arrays.asList(store1, store2, store3, store4); + List orderedAndEnoughCapacity = new ArrayList<>(); + if (hasStoragesWithEnoughCapacity) { + orderedAndEnoughCapacity.add(store3); + orderedAndEnoughCapacity.add(store2); + } + + return new Pair<>(unordered, orderedAndEnoughCapacity); + } + + @Test + public void getImageStoreWithFreeCapacityTestImageStoresWithEnoughCapacityExistReturnsImageStoreWithMostFreeCapacity() { + Pair, List> unorderedAndOrdered = prepareUnorderedAndOrderedImageStoresForCapacityTests(true); + + DataStore result = imageStoreProviderManager.getImageStoreWithFreeCapacity(unorderedAndOrdered.first()); + + Assert.assertEquals(unorderedAndOrdered.second().get(0), result); + } + + @Test + public void getImageStoreWithFreeCapacityTestImageStoresWithEnoughCapacityDoNotExistReturnsNull() { + Pair, List> unorderedAndOrdered = prepareUnorderedAndOrderedImageStoresForCapacityTests(false); + + DataStore result = imageStoreProviderManager.getImageStoreWithFreeCapacity(unorderedAndOrdered.first()); + + Assert.assertNull(result); + } + + @Test + public void orderImageStoresOnFreeCapacityTestReturnsImageStoresOrderedFromMostToLeast() { + Pair, List> unorderedAndOrdered = prepareUnorderedAndOrderedImageStoresForCapacityTests(true); + + List result = imageStoreProviderManager.orderImageStoresOnFreeCapacity(unorderedAndOrdered.first()); + + Assert.assertEquals(unorderedAndOrdered.second(), result); + } + } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 63524ccb6db..2c034d8429a 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -124,18 +124,24 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement protected List reorderPoolsByCapacity(DeploymentPlan plan, List pools) { Long zoneId = plan.getDataCenterId(); Long clusterId = plan.getClusterId(); - short capacityType; if (CollectionUtils.isEmpty(pools)) { return null; } - if (pools.get(0).getPoolType().isShared()) { + short capacityType = Capacity.CAPACITY_TYPE_LOCAL_STORAGE; + String storageType = "local"; + StoragePool storagePool = pools.get(0); + if (storagePool.isShared()) { capacityType = Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED; - } else { - capacityType = Capacity.CAPACITY_TYPE_LOCAL_STORAGE; + storageType = "shared"; } + logger.debug(String.format( + "Filtering storage pools by capacity type [%s] as the first storage pool of the list, with name [%s] and ID [%s], is a [%s] storage.", + capacityType, storagePool.getName(), storagePool.getUuid(), storageType + )); + List poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType); logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity)); @@ -221,6 +227,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } List reorderStoragePoolsBasedOnAlgorithm(List pools, DeploymentPlan plan, Account account) { + logger.debug(String.format("Using allocation algorithm [%s] to reorder pools.", allocationAlgorithm)); + if (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) { reorderRandomPools(pools); } else if (StringUtils.equalsAny(allocationAlgorithm, "userdispersing", "firstfitleastconsumed")) { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index bf67be91108..26bef607c9b 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -622,7 +622,7 @@ public class VolumeServiceImpl implements VolumeService { try { Thread.sleep(sleepTime * 1000); } catch (InterruptedException e) { - logger.debug("waiting for template download been interrupted: " + e.toString()); + logger.debug("waiting for template download been interrupted: " + e); } tries--; } @@ -691,7 +691,6 @@ public class VolumeServiceImpl implements VolumeService { } _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); } - return; } protected Void managedCopyBaseImageCallback(AsyncCallbackDispatcher callback, ManagedCreateBaseImageContext context) { @@ -1039,7 +1038,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary.getImage(), destHost)); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary.getImage(), destHost), e); } templateOnPrimary.processEvent(Event.CopyingRequested); @@ -1161,7 +1160,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost)); + throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost), e); } _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); @@ -1408,7 +1407,7 @@ public class VolumeServiceImpl implements VolumeService { try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost)); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost), e); } templateOnPrimary.processEvent(Event.CopyingRequested); diff --git a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java index c87a0996fcc..062d2226876 100644 --- a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java +++ b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java @@ -42,4 +42,8 @@ public interface IndirectAgentLBAlgorithm { * @return true if the lists are equal, false if not */ boolean compare(final List msList, final List receivedMsList); + + default boolean isHostListNeeded() { + return false; + } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java index 158d4da2c5a..1eefd57d665 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java @@ -121,8 +121,7 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase l = listBy(sc); - return l.size(); + return getCount(sc); } @Override diff --git a/framework/config/src/main/java/org/apache/cloudstack/config/Configuration.java b/framework/config/src/main/java/org/apache/cloudstack/config/Configuration.java index b93817a9919..d31d14586be 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/config/Configuration.java +++ b/framework/config/src/main/java/org/apache/cloudstack/config/Configuration.java @@ -17,6 +17,9 @@ package org.apache.cloudstack.config; import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.framework.config.ConfigKey; /** * Configuration represents one global configuration parameter for CloudStack. @@ -74,7 +77,9 @@ public interface Configuration { * always global. A non-null value indicates that this parameter can be * set at a certain organization level. */ - String getScope(); + int getScope(); + + List getScopes(); /** * @return can the configuration parameter be changed without restarting the server. diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java index 5ee5f9dec48..12f3653b9b3 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java @@ -18,6 +18,8 @@ package org.apache.cloudstack.framework.config; import java.util.Set; +import com.cloud.utils.Pair; + /** * ConfigDepot is a repository of configurations. * @@ -34,4 +36,5 @@ public interface ConfigDepot { boolean isNewConfig(ConfigKey configKey); String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId); void invalidateConfigCache(String key, ConfigKey.Scope scope, Long scopeId); + Pair getParentScope(ConfigKey.Scope scope, Long id); } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java index 00cf56345c8..26151ab5b58 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java @@ -17,8 +17,14 @@ package org.apache.cloudstack.framework.config; import java.sql.Date; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; +import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; @@ -30,6 +36,7 @@ import com.cloud.utils.exception.CloudRuntimeException; * */ public class ConfigKey { + private static final Logger logger = LogManager.getLogger(ConfigKey.class); public static final String CATEGORY_ADVANCED = "Advanced"; public static final String CATEGORY_ALERT = "Alert"; @@ -37,7 +44,89 @@ public class ConfigKey { public static final String CATEGORY_SYSTEM = "System"; public enum Scope { - Global, Zone, Cluster, StoragePool, Account, ManagementServer, ImageStore, Domain + Global(null, 1), + Zone(Global, 1 << 1), + Cluster(Zone, 1 << 2), + StoragePool(Cluster, 1 << 3), + ManagementServer(Global, 1 << 4), + ImageStore(Zone, 1 << 5), + Domain(Global, 1 << 6), + Account(Domain, 1 << 7); + + private final Scope parent; + private final int bitValue; + + Scope(Scope parent, int bitValue) { + this.parent = parent; + this.bitValue = bitValue; + } + + public Scope getParent() { + return parent; + } + + public int getBitValue() { + return bitValue; + } + + public boolean isDescendantOf(Scope other) { + Scope parent = this.getParent(); + while (parent != null) { + if (parent == other) { + return true; + } + parent = parent.getParent(); + } + return false; + } + + public static List getAllDescendants(String str) { + Scope s1 = Scope.valueOf(str); + List scopes = new ArrayList<>(); + for (Scope s : Scope.values()) { + if (s.isDescendantOf(s1)) { + scopes.add(s); + } + } + return scopes; + } + + public static List decode(int bitmask) { + if (bitmask == 0) { + return Collections.emptyList(); + } + List scopes = new ArrayList<>(); + for (Scope scope : Scope.values()) { + if ((bitmask & scope.getBitValue()) != 0) { + scopes.add(scope); + } + } + return scopes; + } + + public static String decodeAsCsv(int bitmask) { + if (bitmask == 0) { + return null; + } + StringBuilder builder = new StringBuilder(); + for (Scope scope : Scope.values()) { + if ((bitmask & scope.getBitValue()) != 0) { + builder.append(scope.name()).append(", "); + } + } + if (builder.length() > 0) { + builder.setLength(builder.length() - 2); + } + return builder.toString(); + } + + public static int getBitmask(Scope... scopes) { + int bitmask = 0; + for (Scope scope : scopes) { + bitmask |= scope.getBitValue(); + } + return bitmask; + } } public enum Kind { @@ -70,8 +159,8 @@ public class ConfigKey { return _displayText; } - public Scope scope() { - return _scope; + public List getScopes() { + return scopes; } public boolean isDynamic() { @@ -108,7 +197,7 @@ public class ConfigKey { private final String _defaultValue; private final String _description; private final String _displayText; - private final Scope _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global + private final List scopes; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global private final boolean _isDynamic; private final String _parent; private final Ternary _group; // Group name, description with precedence @@ -128,6 +217,10 @@ public class ConfigKey { this(type, name, category, defaultValue, description, isDynamic, scope, null); } + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, List scopes) { + this(type, name, category, defaultValue, description, isDynamic, scopes, null); + } + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, Scope scope, String parent) { this(type, name, category, defaultValue, description, isDynamic, scope, null, null, parent, null, null, null, null); } @@ -148,6 +241,10 @@ public class ConfigKey { this(type, name, category, defaultValue, description, isDynamic, scope, multiplier, null, null, null, null, null, null); } + public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, List scopes, T multiplier) { + this(type, name, category, defaultValue, description, isDynamic, scopes, multiplier, null, null, null, null, null, null); + } + public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier, String parent) { this(type, name, category, defaultValue, description, isDynamic, scope, multiplier, null, parent, null, null, null, null); } @@ -159,13 +256,22 @@ public class ConfigKey { public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier, String displayText, String parent, Ternary group, Pair subGroup, Kind kind, String options) { + this(type, name, category, defaultValue, description, isDynamic, scope == null ? null : List.of(scope), multiplier, + displayText, parent, group, subGroup, kind, options); + } + + public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, List scopes, T multiplier, + String displayText, String parent, Ternary group, Pair subGroup, Kind kind, String options) { _category = category; _type = type; _name = name; _defaultValue = defaultValue; _description = description; _displayText = displayText; - _scope = scope; + this.scopes = new ArrayList<>(); + if (scopes != null) { + this.scopes.addAll(scopes); + } _isDynamic = isDynamic; _multiplier = multiplier; _parent = parent; @@ -218,28 +324,45 @@ public class ConfigKey { String value = s_depot != null ? s_depot.getConfigStringValue(_name, Scope.Global, null) : null; _value = valueOf((value == null) ? defaultValue() : value); } - return _value; } - protected T valueInScope(Scope scope, Long id) { + protected T valueInGlobalOrAvailableParentScope(Scope scope, Long id) { + if (scopes.size() <= 1) { + return value(); + } + Pair s = new Pair<>(scope, id); + do { + s = s_depot != null ? s_depot.getParentScope(s.first(), s.second()) : null; + if (s != null && scopes.contains(s.first())) { + return valueInScope(s.first(), s.second()); + } + } while (s != null); + logger.trace("Global value for config ({}): {}", _name, _value); + return value(); + } + + public T valueInScope(Scope scope, Long id) { if (id == null) { return value(); } - String value = s_depot != null ? s_depot.getConfigStringValue(_name, scope, id) : null; if (value == null) { - return value(); + return valueInGlobalOrAvailableParentScope(scope, id); } + logger.trace("Scope({}) value for config ({}): {}", scope, _name, _value); return valueOf(value); } - public T valueIn(Long id) { - return valueInScope(_scope, id); + protected Scope getPrimaryScope() { + if (CollectionUtils.isNotEmpty(scopes)) { + return scopes.get(0); + } + return null; } - public T valueInDomain(Long domainId) { - return valueInScope(Scope.Domain, domainId); + public T valueIn(Long id) { + return valueInScope(getPrimaryScope(), id); } @SuppressWarnings("unchecked") @@ -277,4 +400,20 @@ public class ConfigKey { } } + public boolean isGlobalOrEmptyScope() { + return CollectionUtils.isEmpty(scopes) || + (scopes.size() == 1 && scopes.get(0) == Scope.Global); + } + + public int getScopeBitmask() { + int bitmask = 0; + if (CollectionUtils.isEmpty(scopes)) { + return bitmask; + } + for (Scope scope : scopes) { + bitmask |= scope.getBitValue(); + } + return bitmask; + } + } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java index 8126b9510a2..7a109456eb0 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java @@ -18,6 +18,8 @@ package org.apache.cloudstack.framework.config; import org.apache.cloudstack.framework.config.ConfigKey.Scope; +import com.cloud.utils.Pair; + /** * * This method is used by individual storage for configuration @@ -31,4 +33,7 @@ public interface ScopedConfigStorage { default String getConfigValue(long id, ConfigKey key) { return getConfigValue(id, key.key()); } + default Pair getParentScope(long id) { + return null; + } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java index 88569558fc6..c464b12571c 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.framework.config.dao; +import java.util.List; import java.util.Map; import org.apache.cloudstack.framework.config.impl.ConfigurationVO; @@ -67,4 +68,6 @@ public interface ConfigurationDao extends GenericDao { boolean update(String name, String category, String value); void invalidateCache(); + + List searchPartialConfigurations(); } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java index 7c4a6f9a609..5b941f8fccc 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java @@ -43,6 +43,7 @@ public class ConfigurationDaoImpl extends GenericDaoBase InstanceSearch; final SearchBuilder NameSearch; + final SearchBuilder PartialSearch; public static final String UPDATE_CONFIGURATION_SQL = "UPDATE configuration SET value = ? WHERE name = ?"; @@ -53,6 +54,11 @@ public class ConfigurationDaoImpl extends GenericDaoBase searchPartialConfigurations() { + SearchCriteria sc = PartialSearch.create(); + return searchIncludingRemoved(sc, null, null, false); + } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java index b47370d9205..b1c3c5d9a27 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import javax.inject.Inject; @@ -36,6 +35,7 @@ import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; +import org.apache.cloudstack.utils.cache.LazyCache; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -44,8 +44,6 @@ import org.apache.logging.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; /** * ConfigDepotImpl implements the ConfigDepot and ConfigDepotAdmin interface. @@ -87,17 +85,15 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { List _scopedStorages; Set _configured = Collections.synchronizedSet(new HashSet()); Set newConfigs = Collections.synchronizedSet(new HashSet<>()); - Cache configCache; + LazyCache configCache; private HashMap>> _allKeys = new HashMap>>(1007); HashMap>> _scopeLevelConfigsMap = new HashMap>>(); public ConfigDepotImpl() { - configCache = Caffeine.newBuilder() - .maximumSize(512) - .expireAfterWrite(CONFIG_CACHE_EXPIRE_SECONDS, TimeUnit.SECONDS) - .build(); + configCache = new LazyCache<>(512, + CONFIG_CACHE_EXPIRE_SECONDS, this::getConfigStringValueInternal); ConfigKey.init(this); createEmptyScopeLevelMappings(); } @@ -148,9 +144,11 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { createOrupdateConfigObject(date, configurable.getConfigComponentName(), key, null); - if ((key.scope() != null) && (key.scope() != ConfigKey.Scope.Global)) { - Set> currentConfigs = _scopeLevelConfigsMap.get(key.scope()); - currentConfigs.add(key); + if (!key.isGlobalOrEmptyScope()) { + for (ConfigKey.Scope scope : key.getScopes()) { + Set> currentConfigs = _scopeLevelConfigsMap.get(scope); + currentConfigs.add(key); + } } } @@ -208,12 +206,12 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { } else { boolean configUpdated = false; if (vo.isDynamic() != key.isDynamic() || !ObjectUtils.equals(vo.getDescription(), key.description()) || !ObjectUtils.equals(vo.getDefaultValue(), key.defaultValue()) || - !ObjectUtils.equals(vo.getScope(), key.scope().toString()) || + !ObjectUtils.equals(vo.getScope(), key.getScopeBitmask()) || !ObjectUtils.equals(vo.getComponent(), componentName)) { vo.setDynamic(key.isDynamic()); vo.setDescription(key.description()); vo.setDefaultValue(key.defaultValue()); - vo.setScope(key.scope().toString()); + vo.setScope(key.getScopeBitmask()); vo.setComponent(componentName); vo.setUpdated(date); configUpdated = true; @@ -287,12 +285,7 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { scopeId = Long.valueOf(parts[2]); } catch (IllegalArgumentException ignored) {} if (!ConfigKey.Scope.Global.equals(scope) && scopeId != null) { - ScopedConfigStorage scopedConfigStorage = null; - for (ScopedConfigStorage storage : _scopedStorages) { - if (storage.getScope() == scope) { - scopedConfigStorage = storage; - } - } + ScopedConfigStorage scopedConfigStorage = getScopedStorage(scope); if (scopedConfigStorage == null) { throw new CloudRuntimeException("Unable to find config storage for this scope: " + scope + " for " + key); } @@ -311,7 +304,7 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { @Override public String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId) { - return configCache.get(getConfigCacheKey(key, scope, scopeId), this::getConfigStringValueInternal); + return configCache.get(getConfigCacheKey(key, scope, scopeId)); } @Override @@ -319,26 +312,6 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { configCache.invalidate(getConfigCacheKey(key, scope, scopeId)); } - public ScopedConfigStorage findScopedConfigStorage(ConfigKey config) { - for (ScopedConfigStorage storage : _scopedStorages) { - if (storage.getScope() == config.scope()) { - return storage; - } - } - - throw new CloudRuntimeException("Unable to find config storage for this scope: " + config.scope() + " for " + config.key()); - } - - public ScopedConfigStorage getDomainScope(ConfigKey config) { - for (ScopedConfigStorage storage : _scopedStorages) { - if (storage.getScope() == ConfigKey.Scope.Domain) { - return storage; - } - } - - throw new CloudRuntimeException("Unable to find config storage for this scope: " + ConfigKey.Scope.Domain + " for " + config.key()); - } - public List getScopedStorages() { return _scopedStorages; } @@ -402,4 +375,27 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { public boolean isNewConfig(ConfigKey configKey) { return newConfigs.contains(configKey.key()); } + + protected ScopedConfigStorage getScopedStorage(ConfigKey.Scope scope) { + ScopedConfigStorage scopedConfigStorage = null; + for (ScopedConfigStorage storage : _scopedStorages) { + if (storage.getScope() == scope) { + scopedConfigStorage = storage; + break; + } + } + return scopedConfigStorage; + } + + @Override + public Pair getParentScope(ConfigKey.Scope scope, Long id) { + if (scope.getParent() == null) { + return null; + } + ScopedConfigStorage scopedConfigStorage = getScopedStorage(scope); + if (scopedConfigStorage == null) { + return null; + } + return scopedConfigStorage.getParentScope(id); + } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java index c705cc64072..d12a41864b0 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.framework.config.impl; import java.util.Date; +import java.util.List; import javax.persistence.Column; import javax.persistence.Entity; @@ -60,7 +61,7 @@ public class ConfigurationVO implements Configuration { private boolean dynamic; @Column(name = "scope") - private String scope; + private Integer scope; @Column(name = "updated") @Temporal(value = TemporalType.TIMESTAMP) @@ -102,6 +103,7 @@ public class ConfigurationVO implements Configuration { this.name = name; this.description = description; this.parent = parentConfigName; + this.scope = 0; setValue(value); setDisplayText(displayText); setGroupId(groupId); @@ -112,7 +114,7 @@ public class ConfigurationVO implements Configuration { this(key.category(), "DEFAULT", component, key.key(), key.defaultValue(), key.description(), key.displayText(), key.parent()); defaultValue = key.defaultValue(); dynamic = key.isDynamic(); - scope = key.scope() != null ? key.scope().toString() : null; + scope = key.getScopeBitmask(); } @Override @@ -183,10 +185,15 @@ public class ConfigurationVO implements Configuration { } @Override - public String getScope() { + public int getScope() { return scope; } + @Override + public List getScopes() { + return ConfigKey.Scope.decode(scope); + } + @Override public boolean isDynamic() { return dynamic; @@ -205,7 +212,7 @@ public class ConfigurationVO implements Configuration { this.defaultValue = defaultValue; } - public void setScope(String scope) { + public void setScope(int scope) { this.scope = scope; } diff --git a/framework/config/src/test/java/org/apache/cloudstack/framework/config/ConfigKeyTest.java b/framework/config/src/test/java/org/apache/cloudstack/framework/config/ConfigKeyTest.java index a3a8aadfa60..50be7200d56 100644 --- a/framework/config/src/test/java/org/apache/cloudstack/framework/config/ConfigKeyTest.java +++ b/framework/config/src/test/java/org/apache/cloudstack/framework/config/ConfigKeyTest.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.framework.config; +import java.util.List; + import org.junit.Assert; import org.junit.Test; @@ -47,4 +49,31 @@ public class ConfigKeyTest { ConfigKey key = new ConfigKey("hond", Boolean.class, "naam", "truus", "thrown name", false); Assert.assertFalse("zero and 0L should be considered the same address", key.isSameKeyAs(0L)); } + + @Test + public void testDecode() { + ConfigKey key = new ConfigKey("testcategoey", Boolean.class, "test", "true", "test descriptuin", false, List.of(Scope.Zone, Scope.StoragePool)); + int bitmask = key.getScopeBitmask(); + List scopes = ConfigKey.Scope.decode(bitmask); + Assert.assertEquals(bitmask, ConfigKey.Scope.getBitmask(scopes.toArray(new Scope[0]))); + for (Scope scope : scopes) { + Assert.assertTrue(scope == Scope.Zone || scope == Scope.StoragePool); + } + } + + @Test + public void testDecodeAsCsv() { + ConfigKey key = new ConfigKey("testcategoey", Boolean.class, "test", "true", "test descriptuin", false, List.of(Scope.Zone, Scope.StoragePool)); + int bitmask = key.getScopeBitmask(); + String scopes = ConfigKey.Scope.decodeAsCsv(bitmask); + Assert.assertTrue("Zone, StoragePool".equals(scopes)); + } + + @Test + public void testGetDescendants() { + List descendants = ConfigKey.Scope.getAllDescendants(Scope.Zone.name()); + for (Scope descendant : descendants) { + Assert.assertTrue(descendant == Scope.Cluster || descendant == Scope.StoragePool || descendant == Scope.ImageStore); + } + } } diff --git a/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java b/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java index 8a7da795345..ed752165aeb 100644 --- a/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java +++ b/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java @@ -20,10 +20,14 @@ package org.apache.cloudstack.framework.config.impl; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -33,11 +37,15 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; +import com.cloud.utils.Pair; + @RunWith(MockitoJUnitRunner.class) public class ConfigDepotImplTest { @Mock ConfigurationDao _configDao; + @Mock + ConfigurationSubGroupDao configSubGroupDao; @InjectMocks private ConfigDepotImpl configDepotImpl = new ConfigDepotImpl(); @@ -107,4 +115,76 @@ public class ConfigDepotImplTest { runTestGetConfigStringValueExpiry(((ConfigDepotImpl.CONFIG_CACHE_EXPIRE_SECONDS) + 5) * 1000, 2); } + + @Test + public void testPopulateConfigurationNewVO() { + ConfigKey StorageDisableThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ALERT, Double.class, "pool.storage.capacity.disablethreshold", "0.85", + "Percentage (as a value between 0 and 1) of storage utilization above which allocators will disable using the pool for low storage available.", + true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); + Configurable configurable = new Configurable() { + @Override + public String getConfigComponentName() { + return "test"; + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] { StorageDisableThreshold }; + } + }; + configDepotImpl.setConfigurables(List.of(configurable)); + configDepotImpl.populateConfigurations(); + + Assert.assertEquals("pool.storage.capacity.disablethreshold", + configDepotImpl._scopeLevelConfigsMap.get(ConfigKey.Scope.Zone).iterator().next().key()); + Assert.assertEquals("pool.storage.capacity.disablethreshold", + configDepotImpl._scopeLevelConfigsMap.get(ConfigKey.Scope.StoragePool).iterator().next().key()); + Assert.assertEquals(0, configDepotImpl._scopeLevelConfigsMap.get(ConfigKey.Scope.Cluster).size()); + } + + @Test + public void testPopulateConfiguration() { + ConfigKey StorageDisableThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ALERT, Double.class, "pool.storage.capacity.disablethreshold", "0.85", + "Percentage (as a value between 0 and 1) of storage utilization above which allocators will disable using the pool for low storage available.", + true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); + Configurable configurable = new Configurable() { + @Override + public String getConfigComponentName() { + return "test"; + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{StorageDisableThreshold}; + } + }; + configDepotImpl.setConfigurables(List.of(configurable)); + + ConfigurationVO configurationVO = new ConfigurationVO(StorageDisableThreshold.category(), "DEFAULT", "component", + StorageDisableThreshold.key(), StorageDisableThreshold.defaultValue(), StorageDisableThreshold.description(), + StorageDisableThreshold.displayText(), StorageDisableThreshold.parent(), 1L, 10L); + Mockito.when(_configDao.findById("pool.storage.capacity.disablethreshold")).thenReturn(configurationVO); + configDepotImpl.populateConfigurations(); + + Mockito.verify(_configDao, Mockito.times(1)).persist(configurationVO); + } + + @Test + public void getParentScopeWithValidScope() { + ConfigKey.Scope scope = ConfigKey.Scope.Cluster; + ScopedConfigStorage scopedConfigStorage = Mockito.mock(ScopedConfigStorage.class); + Long id = 1L; + ConfigKey.Scope parentScope = ConfigKey.Scope.Zone; + Long parentId = 2L; + + Mockito.when(scopedConfigStorage.getScope()).thenReturn(scope); + Mockito.when(scopedConfigStorage.getParentScope(id)).thenReturn(new Pair<>(parentScope, parentId)); + + configDepotImpl.setScopedStorages(Collections.singletonList(scopedConfigStorage)); + Pair result = configDepotImpl.getParentScope(scope, id); + + Assert.assertNotNull(result); + Assert.assertEquals(parentScope, result.first()); + Assert.assertEquals(parentId, result.second()); + } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java index de8838b0999..44c312ea9d8 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java @@ -148,6 +148,11 @@ public interface GenericDao { */ List listAll(Filter filter); + /** + * Look IDs for all active rows. + */ + List listAllIds(); + /** * Search for the entity beans * @param sc diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index c7f2daadc51..bf6fb03563f 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1218,6 +1218,35 @@ public abstract class GenericDaoBase extends Compone return executeList(sql.toString()); } + private Object getIdObject() { + T entity = (T)_searchEnhancer.create(); + try { + Method m = _entityBeanType.getMethod("getId"); + return m.invoke(entity); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + logger.warn("Unable to get ID object for entity: {}", _entityBeanType.getSimpleName()); + } + return null; + } + + @Override + public List listAllIds() { + Object idObj = getIdObject(); + if (idObj == null) { + return Collections.emptyList(); + } + Class clazz = (Class)idObj.getClass(); + GenericSearchBuilder sb = createSearchBuilder(clazz); + try { + Method m = sb.entity().getClass().getMethod("getId"); + sb.selectFields(m.invoke(sb.entity())); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + return Collections.emptyList(); + } + sb.done(); + return customSearch(sb.create(), null); + } + @Override public boolean expunge(final ID id) { final TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -2445,4 +2474,11 @@ public abstract class GenericDaoBase extends Compone } } + public static class SumCount { + public long sum; + public long count; + + public SumCount() { + } + } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/SearchBase.java b/framework/db/src/main/java/com/cloud/utils/db/SearchBase.java index fcc9ded684d..f2d08aa876e 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/SearchBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/SearchBase.java @@ -484,6 +484,9 @@ public abstract class SearchBase, T, K> { tableAlias = attr.table; } } + if (op == Op.BINARY_OR) { + sql.append("("); + } sql.append(tableAlias).append(".").append(attr.columnName).append(op.toString()); if (op == Op.IN && params.length == 1) { diff --git a/framework/db/src/main/java/com/cloud/utils/db/SearchCriteria.java b/framework/db/src/main/java/com/cloud/utils/db/SearchCriteria.java index 8affbd5300a..caf88fadb9f 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/SearchCriteria.java +++ b/framework/db/src/main/java/com/cloud/utils/db/SearchCriteria.java @@ -38,7 +38,7 @@ public class SearchCriteria { " NOT BETWEEN ? AND ? ", 2), IN(" IN () ", -1), NOTIN(" NOT IN () ", -1), LIKE(" LIKE ? ", 1), NLIKE(" NOT LIKE ? ", 1), NIN(" NOT IN () ", -1), NULL(" IS NULL ", 0), NNULL( " IS NOT NULL ", - 0), SC(" () ", 1), TEXT(" () ", 1), RP("", 0), AND(" AND ", 0), OR(" OR ", 0), NOT(" NOT ", 0), FIND_IN_SET(" ) ", 1); + 0), SC(" () ", 1), TEXT(" () ", 1), RP("", 0), AND(" AND ", 0), OR(" OR ", 0), NOT(" NOT ", 0), FIND_IN_SET(" ) ", 1), BINARY_OR(" & ?) > 0", 1); private final String op; int params; diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java index b3bfda0334c..79ec3f2b087 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java @@ -40,4 +40,5 @@ public interface VmWorkJobDao extends GenericDao { void expungeLeftoverWorkJobs(long msid); int expungeByVmList(List vmIds, Long batchSize); + List listVmIdsWithPendingJob(); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index 3b167498a37..a467b5fdf59 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,6 +24,7 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; import org.apache.cloudstack.jobs.JobInfo; @@ -32,6 +33,8 @@ import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @@ -224,4 +227,17 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen sc.setParameters("vmIds", vmIds.toArray()); return batchExpunge(sc, batchSize); } + + @Override + public List listVmIdsWithPendingJob() { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + SearchBuilder asyncJobSearch = _baseJobDao.createSearchBuilder(); + asyncJobSearch.and("status", asyncJobSearch.entity().getStatus(), SearchCriteria.Op.EQ); + sb.join("asyncJobSearch", asyncJobSearch, sb.entity().getId(), asyncJobSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.and("removed", sb.entity().getRemoved(), Op.NULL); + sb.selectFields(sb.entity().getVmInstanceId()); + SearchCriteria sc = sb.create(); + sc.setJoinParameters("asyncJobSearch", "status", JobInfo.Status.IN_PROGRESS); + return customSearch(sc, null); + } } diff --git a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java index 3e2bc15b1e0..a70a96b1a14 100644 --- a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java +++ b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java @@ -16,27 +16,69 @@ // under the License. package org.apache.cloudstack.framework.jobs.dao; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.apache.cloudstack.jobs.JobInfo; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; +import org.mockito.InjectMocks; +import org.mockito.Mock; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @RunWith(MockitoJUnitRunner.class) public class VmWorkJobDaoImplTest { + @Mock + AsyncJobDao asyncJobDao; @Spy + @InjectMocks VmWorkJobDaoImpl vmWorkJobDaoImpl; + private GenericSearchBuilder genericVmWorkJobSearchBuilder; + private SearchBuilder asyncJobSearchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + genericVmWorkJobSearchBuilder = mock(GenericSearchBuilder.class); + VmWorkJobVO entityVO = mock(VmWorkJobVO.class); + when(genericVmWorkJobSearchBuilder.entity()).thenReturn(entityVO); + asyncJobSearchBuilder = mock(SearchBuilder.class); + AsyncJobVO asyncJobVO = mock(AsyncJobVO.class); + when(asyncJobSearchBuilder.entity()).thenReturn(asyncJobVO); + searchCriteria = mock(SearchCriteria.class); + when(vmWorkJobDaoImpl.createSearchBuilder(Long.class)).thenReturn(genericVmWorkJobSearchBuilder); + when(asyncJobDao.createSearchBuilder()).thenReturn(asyncJobSearchBuilder); + when(genericVmWorkJobSearchBuilder.create()).thenReturn(searchCriteria); + } + @Test public void testExpungeByVmListNoVms() { Assert.assertEquals(0, vmWorkJobDaoImpl.expungeByVmList( @@ -47,22 +89,52 @@ public class VmWorkJobDaoImplTest { @Test public void testExpungeByVmList() { - SearchBuilder sb = Mockito.mock(SearchBuilder.class); - SearchCriteria sc = Mockito.mock(SearchCriteria.class); - Mockito.when(sb.create()).thenReturn(sc); - Mockito.doAnswer((Answer) invocationOnMock -> { + SearchBuilder sb = mock(SearchBuilder.class); + SearchCriteria sc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + doAnswer((Answer) invocationOnMock -> { Long batchSize = (Long)invocationOnMock.getArguments()[1]; return batchSize == null ? 0 : batchSize.intValue(); - }).when(vmWorkJobDaoImpl).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); - Mockito.when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); - final VmWorkJobVO mockedVO = Mockito.mock(VmWorkJobVO.class); - Mockito.when(sb.entity()).thenReturn(mockedVO); + }).when(vmWorkJobDaoImpl).batchExpunge(any(SearchCriteria.class), anyLong()); + when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); + final VmWorkJobVO mockedVO = mock(VmWorkJobVO.class); + when(sb.entity()).thenReturn(mockedVO); List vmIds = List.of(1L, 2L); Object[] array = vmIds.toArray(); Long batchSize = 50L; Assert.assertEquals(batchSize.intValue(), vmWorkJobDaoImpl.expungeByVmList(List.of(1L, 2L), batchSize)); - Mockito.verify(sc).setParameters("vmIds", array); - Mockito.verify(vmWorkJobDaoImpl, Mockito.times(1)) + verify(sc).setParameters("vmIds", array); + verify(vmWorkJobDaoImpl, times(1)) .batchExpunge(sc, batchSize); } + + @Test + public void testListVmIdsWithPendingJob() { + List mockVmIds = Arrays.asList(101L, 102L, 103L); + doReturn(mockVmIds).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(101L), result.get(0)); + assertEquals(Long.valueOf(102L), result.get(1)); + assertEquals(Long.valueOf(103L), result.get(2)); + } + + @Test + public void testListVmIdsWithPendingJobEmptyResult() { + doReturn(Collections.emptyList()).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertTrue(result.isEmpty()); + } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java index c9254814f46..a03f82a4358 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java @@ -34,6 +34,7 @@ import javax.naming.ConfigurationException; import com.cloud.user.Account; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.quota.activationrule.presetvariables.Configuration; import org.apache.cloudstack.quota.activationrule.presetvariables.GenericPresetVariable; import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariableHelper; import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariables; @@ -467,6 +468,11 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { } + Configuration configuration = presetVariables.getConfiguration(); + if (configuration != null) { + jsInterpreter.injectVariable("configuration", configuration.toString()); + } + jsInterpreter.injectStringVariable("resourceType", presetVariables.getResourceType()); jsInterpreter.injectVariable("value", presetVariables.getValue().toString()); jsInterpreter.injectVariable("zone", presetVariables.getZone().toString()); diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java index 1d294276d47..09182711ca8 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java @@ -17,10 +17,15 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; +import org.apache.cloudstack.quota.constant.QuotaTypes; + public class ComputeOffering extends GenericPresetVariable { @PresetVariableDefinition(description = "A boolean informing if the compute offering is customized or not.") private boolean customized; + @PresetVariableDefinition(description = "A boolean informing if the compute offering offers HA or not.", supportedTypes = {QuotaTypes.RUNNING_VM}) + private boolean offerHa; + public boolean isCustomized() { return customized; } @@ -30,4 +35,13 @@ public class ComputeOffering extends GenericPresetVariable { fieldNamesToIncludeInToString.add("customized"); } + public boolean offerHa() { + return offerHa; + } + + public void setOfferHa(boolean offerHa) { + this.offerHa = offerHa; + fieldNamesToIncludeInToString.add("offerHa"); + } + } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Configuration.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Configuration.java new file mode 100644 index 00000000000..e59f78af8d9 --- /dev/null +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Configuration.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.quota.activationrule.presetvariables; + +import org.apache.cloudstack.quota.constant.QuotaTypes; + +public class Configuration extends GenericPresetVariable{ + + @PresetVariableDefinition(description = "A boolean informing if the cluster configuration force.ha is enabled or not.", supportedTypes = {QuotaTypes.RUNNING_VM}) + private boolean forceHa; + + public boolean getForceHa() { + return forceHa; + } + + public void setForceHa(boolean forceHa) { + this.forceHa = forceHa; + fieldNamesToIncludeInToString.add("forceHa"); + } +} diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/DiskOfferingPresetVariables.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/DiskOfferingPresetVariables.java new file mode 100644 index 00000000000..b2f5f69502f --- /dev/null +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/DiskOfferingPresetVariables.java @@ -0,0 +1,165 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.quota.activationrule.presetvariables; + +public class DiskOfferingPresetVariables extends GenericPresetVariable { + + @PresetVariableDefinition(description = "A long informing the bytes read rate of the disk offering.") + private Long bytesReadRate; + + @PresetVariableDefinition(description = "A long informing the burst bytes read rate of the disk offering.") + private Long bytesReadBurst; + + @PresetVariableDefinition(description = "The length (in seconds) of the bytes read burst.") + private Long bytesReadBurstLength; + + @PresetVariableDefinition(description = "A long informing the bytes write rate of the disk offering.") + private Long bytesWriteRate; + + @PresetVariableDefinition(description = "A long informing the burst bytes write rate of the disk offering.") + private Long bytesWriteBurst; + + @PresetVariableDefinition(description = "The length (in seconds) of the bytes write burst.") + private Long bytesWriteBurstLength; + + @PresetVariableDefinition(description = "A long informing the I/O requests read rate of the disk offering.") + private Long iopsReadRate; + + @PresetVariableDefinition(description = "A long informing the burst I/O requests read rate of the disk offering.") + private Long iopsReadBurst; + + @PresetVariableDefinition(description = "The length (in seconds) of the IOPS read burst.") + private Long iopsReadBurstLength; + + @PresetVariableDefinition(description = "A long informing the I/O requests write rate of the disk offering.") + private Long iopsWriteRate; + + @PresetVariableDefinition(description = "A long informing the burst I/O requests write rate of the disk offering.") + private Long iopsWriteBurst; + + @PresetVariableDefinition(description = "The length (in seconds) of the IOPS write burst.") + private Long iopsWriteBurstLength; + + public Long getBytesReadRate() { + return bytesReadRate; + } + + public void setBytesReadRate(Long bytesReadRate) { + this.bytesReadRate = bytesReadRate; + fieldNamesToIncludeInToString.add("bytesReadRate"); + } + + public Long getBytesReadBurst() { + return bytesReadBurst; + } + + public void setBytesReadBurst(Long bytesReadBurst) { + this.bytesReadBurst = bytesReadBurst; + fieldNamesToIncludeInToString.add("bytesReadBurst"); + } + + public Long getBytesReadBurstLength() { + return bytesReadBurstLength; + } + + public void setBytesReadBurstLength(Long bytesReadBurstLength) { + this.bytesReadBurstLength = bytesReadBurstLength; + fieldNamesToIncludeInToString.add("bytesReadBurstLength"); + } + + public Long getBytesWriteRate() { + return bytesWriteRate; + } + + public void setBytesWriteRate(Long bytesWriteRate) { + this.bytesWriteRate = bytesWriteRate; + fieldNamesToIncludeInToString.add("bytesWriteRate"); + } + + public Long getBytesWriteBurst() { + return bytesWriteBurst; + } + + public void setBytesWriteBurst(Long bytesWriteBurst) { + this.bytesWriteBurst = bytesWriteBurst; + fieldNamesToIncludeInToString.add("bytesWriteBurst"); + } + + public Long getBytesWriteBurstLength() { + return bytesWriteBurstLength; + } + + public void setBytesWriteBurstLength(Long bytesWriteBurstLength) { + this.bytesWriteBurstLength = bytesWriteBurstLength; + fieldNamesToIncludeInToString.add("bytesWriteBurstLength"); + } + + public Long getIopsReadRate() { + return iopsReadRate; + } + + public void setIopsReadRate(Long iopsReadRate) { + this.iopsReadRate = iopsReadRate; + fieldNamesToIncludeInToString.add("iopsReadRate"); + } + + public Long getIopsReadBurst() { + return iopsReadBurst; + } + + public void setIopsReadBurst(Long iopsReadBurst) { + this.iopsReadBurst = iopsReadBurst; + fieldNamesToIncludeInToString.add("iopsReadBurst"); + } + + public Long getIopsReadBurstLength() { + return iopsReadBurstLength; + } + + public void setIopsReadBurstLength(Long iopsReadBurstLength) { + this.iopsReadBurstLength = iopsReadBurstLength; + fieldNamesToIncludeInToString.add("iopsReadBurstLength"); + } + + public Long getIopsWriteRate() { + return iopsWriteRate; + } + + public void setIopsWriteRate(Long iopsWriteRate) { + this.iopsWriteRate = iopsWriteRate; + fieldNamesToIncludeInToString.add("iopsWriteRate"); + } + + public Long getIopsWriteBurst() { + return iopsWriteBurst; + } + + public void setIopsWriteBurst(Long iopsWriteBurst) { + this.iopsWriteBurst = iopsWriteBurst; + fieldNamesToIncludeInToString.add("iopsWriteBurst"); + } + + public Long getIopsWriteBurstLength() { + return iopsWriteBurstLength; + } + + public void setIopsWriteBurstLength(Long iopsWriteBurstLength) { + this.iopsWriteBurstLength = iopsWriteBurstLength; + fieldNamesToIncludeInToString.add("iopsWriteBurstLength"); + } +} diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java index 1e84ba27e02..05b75f4f64d 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java @@ -25,6 +25,8 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; import com.cloud.host.HostTagVO; import com.cloud.network.dao.NetworkVO; import com.cloud.network.vpc.VpcVO; @@ -37,6 +39,7 @@ import org.apache.cloudstack.acl.dao.RoleDao; import org.apache.cloudstack.backup.BackupOfferingVO; import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.dao.NetworkDao; import org.apache.cloudstack.quota.dao.VmTemplateDao; @@ -51,6 +54,7 @@ import org.apache.cloudstack.usage.UsageTypes; import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.jsinterpreter.JsInterpreter; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; @@ -181,6 +185,11 @@ public class PresetVariableHelper { @Inject VpcDao vpcDao; + @Inject + ConfigurationDao configDao; + + @Inject + ClusterDetailsDao clusterDetailsDao; protected boolean backupSnapshotAfterTakingSnapshot = SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value(); @@ -194,6 +203,7 @@ public class PresetVariableHelper { presetVariables.setAccount(getPresetVariableAccount(usageRecord.getAccountId())); setPresetVariableProject(presetVariables); + setPresetVariableConfiguration(presetVariables, usageRecord); presetVariables.setDomain(getPresetVariableDomain(usageRecord.getDomainId())); presetVariables.setResourceType(usageRecord.getType()); @@ -272,6 +282,39 @@ public class PresetVariableHelper { return zone; } + protected void setPresetVariableConfiguration(PresetVariables presetVariables, UsageVO usageRecord) { + if (usageRecord.getUsageType() != UsageTypes.RUNNING_VM) { + return; + } + + Configuration configuration = new Configuration(); + setForceHaInConfiguration(configuration, usageRecord); + + presetVariables.setConfiguration(configuration); + } + + protected void setForceHaInConfiguration(Configuration configuration, UsageVO usageRecord) { + Long vmId = usageRecord.getUsageId(); + VMInstanceVO vmVo = vmInstanceDao.findByIdIncludingRemoved(vmId); + validateIfObjectIsNull(vmVo, vmId, "VM"); + + Long hostId = ObjectUtils.defaultIfNull(vmVo.getHostId(), vmVo.getLastHostId()); + + HostVO hostVo = hostDao.findByIdIncludingRemoved(hostId); + validateIfObjectIsNull(hostVo, hostId, "host"); + ClusterDetailsVO forceHa = clusterDetailsDao.findDetail(hostVo.getClusterId(), "force.ha"); + + String forceHaValue; + + if (forceHa != null) { + forceHaValue = forceHa.getValue(); + } else { + forceHaValue = configDao.getValue("force.ha"); + } + + configuration.setForceHa((Boolean.parseBoolean(forceHaValue))); + } + protected Value getPresetVariableValue(UsageVO usageRecord) { Long accountId = usageRecord.getAccountId(); int usageType = usageRecord.getUsageType(); @@ -390,12 +433,16 @@ public class PresetVariableHelper { return guestOsVo.getDisplayName(); } - protected ComputeOffering getPresetVariableValueComputeOffering(ServiceOfferingVO serviceOfferingVo) { + protected ComputeOffering getPresetVariableValueComputeOffering(ServiceOfferingVO serviceOfferingVo, int usageType) { ComputeOffering computeOffering = new ComputeOffering(); computeOffering.setId(serviceOfferingVo.getUuid()); computeOffering.setName(serviceOfferingVo.getName()); computeOffering.setCustomized(serviceOfferingVo.isDynamic()); + if (usageType == UsageTypes.RUNNING_VM) { + computeOffering.setOfferHa(serviceOfferingVo.isOfferHA()); + } + return computeOffering; } @@ -404,7 +451,7 @@ public class PresetVariableHelper { long computeOfferingId = vmVo.getServiceOfferingId(); ServiceOfferingVO serviceOfferingVo = serviceOfferingDao.findByIdIncludingRemoved(computeOfferingId); validateIfObjectIsNull(serviceOfferingVo, computeOfferingId, "compute offering"); - value.setComputeOffering(getPresetVariableValueComputeOffering(serviceOfferingVo)); + value.setComputeOffering(getPresetVariableValueComputeOffering(serviceOfferingVo, usageType)); if (usageType == UsageTypes.RUNNING_VM) { value.setComputingResources(getPresetVariableValueComputingResource(vmVo, serviceOfferingVo)); @@ -492,6 +539,7 @@ public class PresetVariableHelper { value.setId(volumeVo.getUuid()); value.setName(volumeVo.getName()); value.setProvisioningType(volumeVo.getProvisioningType()); + value.setVolumeType(volumeVo.getVolumeType()); Long poolId = volumeVo.getPoolId(); if (poolId == null) { @@ -510,13 +558,25 @@ public class PresetVariableHelper { } } - protected GenericPresetVariable getPresetVariableValueDiskOffering(Long diskOfferingId) { + protected DiskOfferingPresetVariables getPresetVariableValueDiskOffering(Long diskOfferingId) { DiskOfferingVO diskOfferingVo = diskOfferingDao.findByIdIncludingRemoved(diskOfferingId); validateIfObjectIsNull(diskOfferingVo, diskOfferingId, "disk offering"); - GenericPresetVariable diskOffering = new GenericPresetVariable(); + DiskOfferingPresetVariables diskOffering = new DiskOfferingPresetVariables(); diskOffering.setId(diskOfferingVo.getUuid()); diskOffering.setName(diskOfferingVo.getName()); + diskOffering.setBytesReadRate(diskOfferingVo.getBytesReadRate()); + diskOffering.setBytesReadBurst(diskOfferingVo.getBytesReadRateMax()); + diskOffering.setBytesReadBurstLength(diskOfferingVo.getBytesReadRateMaxLength()); + diskOffering.setBytesWriteRate(diskOfferingVo.getBytesWriteRate()); + diskOffering.setBytesWriteBurst(diskOfferingVo.getBytesWriteRateMax()); + diskOffering.setBytesWriteBurstLength(diskOfferingVo.getBytesWriteRateMaxLength()); + diskOffering.setIopsReadRate(diskOfferingVo.getIopsReadRate()); + diskOffering.setIopsReadBurst(diskOfferingVo.getIopsReadRateMax()); + diskOffering.setIopsReadBurstLength(diskOfferingVo.getIopsReadRateMaxLength()); + diskOffering.setIopsWriteRate(diskOfferingVo.getIopsWriteRate()); + diskOffering.setIopsWriteBurst(diskOfferingVo.getIopsWriteRateMax()); + diskOffering.setIopsWriteBurstLength(diskOfferingVo.getIopsWriteRateMaxLength()); return diskOffering; } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java index 6dab6604e91..1f8b88ca4cd 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java @@ -39,6 +39,9 @@ public class PresetVariables { @PresetVariableDefinition(description = "Zone where the resource is.") private GenericPresetVariable zone; + @PresetVariableDefinition(description = "Configurations of the resource.") + private Configuration configuration; + @PresetVariableDefinition(description = "A list containing the tariffs ordered by the field 'position'.") private List lastTariffs; @@ -90,6 +93,14 @@ public class PresetVariables { this.zone = zone; } + public Configuration getConfiguration() { + return configuration; + } + + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + public List getLastTariffs() { return lastTariffs; } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java index d87146d8798..77e539db0f3 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java @@ -22,6 +22,7 @@ import java.util.Map; import com.cloud.storage.Snapshot; import com.cloud.storage.Storage.ProvisioningType; +import com.cloud.storage.Volume; import com.cloud.vm.snapshot.VMSnapshot; import org.apache.cloudstack.quota.constant.QuotaTypes; @@ -75,7 +76,7 @@ public class Value extends GenericPresetVariable { private GenericPresetVariable template; @PresetVariableDefinition(description = "Disk offering of the volume.", supportedTypes = {QuotaTypes.VOLUME}) - private GenericPresetVariable diskOffering; + private DiskOfferingPresetVariables diskOffering; @PresetVariableDefinition(description = "Storage where the volume or snapshot is. While handling with snapshots, this value can be from the primary storage if the global " + "setting 'snapshot.backup.to.secondary' is false, otherwise it will be from secondary storage.", supportedTypes = {QuotaTypes.VOLUME, QuotaTypes.SNAPSHOT}) @@ -93,6 +94,10 @@ public class Value extends GenericPresetVariable { @PresetVariableDefinition(description = "The volume format. Values can be: RAW, VHD, VHDX, OVA and QCOW2.", supportedTypes = {QuotaTypes.VOLUME, QuotaTypes.VOLUME_SECONDARY}) private String volumeFormat; + + @PresetVariableDefinition(description = "The volume type. Values can be: UNKNOWN, ROOT, SWAP, DATADISK and ISO.", supportedTypes = {QuotaTypes.VOLUME}) + private Volume.Type volumeType; + private String state; public Host getHost() { @@ -194,11 +199,11 @@ public class Value extends GenericPresetVariable { fieldNamesToIncludeInToString.add("template"); } - public GenericPresetVariable getDiskOffering() { + public DiskOfferingPresetVariables getDiskOffering() { return diskOffering; } - public void setDiskOffering(GenericPresetVariable diskOffering) { + public void setDiskOffering(DiskOfferingPresetVariables diskOffering) { this.diskOffering = diskOffering; fieldNamesToIncludeInToString.add("diskOffering"); } @@ -257,6 +262,15 @@ public class Value extends GenericPresetVariable { return volumeFormat; } + public Volume.Type getVolumeType() { + return volumeType; + } + + public void setVolumeType(Volume.Type volumeType) { + this.volumeType = volumeType; + fieldNamesToIncludeInToString.add("volumeType"); + } + public String getState() { return state; } diff --git a/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java b/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java index 5dfc12f7ef8..c62f80d4a44 100644 --- a/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java +++ b/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java @@ -270,6 +270,7 @@ public class QuotaManagerImplTest { Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("account"), Mockito.anyString()); Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("domain"), Mockito.anyString()); Mockito.verify(jsInterpreterMock, Mockito.never()).injectVariable(Mockito.eq("project"), Mockito.anyString()); + Mockito.verify(jsInterpreterMock, Mockito.never()).injectVariable(Mockito.eq("configuration"), Mockito.anyString()); Mockito.verify(jsInterpreterMock).injectStringVariable(Mockito.eq("resourceType"), Mockito.anyString()); Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("value"), Mockito.anyString()); Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("zone"), Mockito.anyString()); diff --git a/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java b/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java index 7f64939f7bb..e2be3acbbb5 100644 --- a/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java +++ b/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelperTest.java @@ -27,6 +27,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; import com.cloud.host.HostTagVO; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.StoragePoolTagVO; @@ -76,6 +78,7 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSDao; @@ -122,6 +125,9 @@ public class PresetVariableHelperTest { @Mock HostTagsDao hostTagsDaoMock; + @Mock + ClusterDetailsDao clusterDetailsDaoMock; + @Mock ImageStoreDao imageStoreDaoMock; @@ -208,7 +214,7 @@ public class PresetVariableHelperTest { value.setComputeOffering(getComputeOfferingForTests()); value.setTags(Collections.singletonMap("tag1", "value1")); value.setTemplate(getGenericPresetVariableForTests()); - value.setDiskOffering(getGenericPresetVariableForTests()); + value.setDiskOffering(getDiskOfferingForTests()); value.setProvisioningType(ProvisioningType.THIN); value.setStorage(getStorageForTests()); value.setSize(ByteScaleUtils.GiB); @@ -216,6 +222,7 @@ public class PresetVariableHelperTest { value.setTag("tag_test"); value.setVmSnapshotType(VMSnapshot.Type.Disk); value.setComputingResources(getComputingResourcesForTests()); + value.setVolumeType(Volume.Type.DATADISK); return value; } @@ -232,6 +239,7 @@ public class PresetVariableHelperTest { computeOffering.setId("compute_offering_id"); computeOffering.setName("compute_offering_name"); computeOffering.setCustomized(false); + computeOffering.setOfferHa(false); return computeOffering; } @@ -243,6 +251,14 @@ public class PresetVariableHelperTest { return host; } + private Configuration getConfigurationForTests() { + Configuration configuration = new Configuration(); + configuration.setId("config_id"); + configuration.setName("config_name"); + configuration.setForceHa(false); + return configuration; + } + private List getHostTagsForTests() { return Arrays.asList(new HostTagVO(1, "tag1", false), new HostTagVO(1, "tag2", false)); } @@ -308,6 +324,13 @@ public class PresetVariableHelperTest { return backupOffering; } + private DiskOfferingPresetVariables getDiskOfferingForTests() { + DiskOfferingPresetVariables diskOffering = new DiskOfferingPresetVariables(); + diskOffering.setId("disk_offering_id"); + diskOffering.setName("disk_offering_name"); + return diskOffering; + } + private void mockMethodValidateIfObjectIsNull() { Mockito.doNothing().when(presetVariableHelperSpy).validateIfObjectIsNull(Mockito.any(), Mockito.anyLong(), Mockito.anyString()); } @@ -329,6 +352,7 @@ public class PresetVariableHelperTest { Mockito.doReturn(expected.getAccount()).when(presetVariableHelperSpy).getPresetVariableAccount(Mockito.anyLong()); Mockito.doNothing().when(presetVariableHelperSpy).setPresetVariableProject(Mockito.any()); + Mockito.doNothing().when(presetVariableHelperSpy).setPresetVariableConfiguration(Mockito.any(), Mockito.any()); Mockito.doReturn(expected.getDomain()).when(presetVariableHelperSpy).getPresetVariableDomain(Mockito.anyLong()); Mockito.doReturn(expected.getValue()).when(presetVariableHelperSpy).getPresetVariableValue(Mockito.any(UsageVO.class)); Mockito.doReturn(expected.getZone()).when(presetVariableHelperSpy).getPresetVariableZone(Mockito.anyLong()); @@ -352,6 +376,35 @@ public class PresetVariableHelperTest { Assert.assertNull(result.getProject()); } + @Test + public void setPresetVariableConfigurationTestQuotaTypeDifferentFromRunningVmDoNothing() { + getQuotaTypesForTests(UsageTypes.RUNNING_VM).forEach(type -> { + PresetVariables result = new PresetVariables(); + Mockito.doReturn(type.getKey()).when(usageVoMock).getUsageType(); + presetVariableHelperSpy.setPresetVariableConfiguration(result, usageVoMock); + + Assert.assertNull(result.getConfiguration()); + }); + } + + @Test + public void setPresetVariableConfigurationTestQuotaTypeIsRunningVmSetConfiguration() { + PresetVariables result = new PresetVariables(); + Configuration expectedConfig = getConfigurationForTests(); + HostVO hostVoMock = Mockito.mock(HostVO.class); + ClusterDetailsVO clusterDetailsVoMock = Mockito.mock(ClusterDetailsVO.class); + + Mockito.doReturn(vmInstanceVoMock).when(vmInstanceDaoMock).findByIdIncludingRemoved(Mockito.anyLong()); + Mockito.doReturn(hostVoMock).when(hostDaoMock).findByIdIncludingRemoved(Mockito.anyLong()); + Mockito.doReturn(1L).when(vmInstanceVoMock).getHostId(); + Mockito.doReturn(1).when(usageVoMock).getUsageType(); + Mockito.doReturn(clusterDetailsVoMock).when(clusterDetailsDaoMock).findDetail(Mockito.anyLong(), Mockito.anyString()); + presetVariableHelperSpy.setPresetVariableConfiguration(result, usageVoMock); + + Assert.assertNotNull(result.getConfiguration()); + Assert.assertEquals(expectedConfig.getForceHa(), result.getConfiguration().getForceHa()); + } + @Test public void setPresetVariableProjectTestAccountWithoutRoleSetAsProject() { PresetVariables result = new PresetVariables(); @@ -627,19 +680,36 @@ public class PresetVariableHelperTest { } @Test - public void getPresetVariableValueComputeOfferingTestSetFieldsAndReturnObject() { + public void getPresetVariableValueComputeOfferingForTestSetFieldsAndReturnObjectForRunningVm() { + ComputeOffering expected = getComputeOfferingForTests(); + Mockito.doReturn(expected.getId()).when(serviceOfferingVoMock).getUuid(); + Mockito.doReturn(expected.getName()).when(serviceOfferingVoMock).getName(); + Mockito.doReturn(expected.isCustomized()).when(serviceOfferingVoMock).isDynamic(); + Mockito.doReturn(expected.offerHa()).when(serviceOfferingVoMock).isOfferHA(); + + ComputeOffering result = presetVariableHelperSpy.getPresetVariableValueComputeOffering(serviceOfferingVoMock, UsageTypes.RUNNING_VM); + + assertPresetVariableIdAndName(expected, result); + Assert.assertEquals(expected.isCustomized(), result.isCustomized()); + Assert.assertEquals(expected.offerHa(), result.offerHa()); + validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "customized", "offerHa"), result); + } + + @Test + public void getPresetVariableValueComputeOfferingForTestSetFieldsAndReturnObjectForAllocatedVm() { ComputeOffering expected = getComputeOfferingForTests(); Mockito.doReturn(expected.getId()).when(serviceOfferingVoMock).getUuid(); Mockito.doReturn(expected.getName()).when(serviceOfferingVoMock).getName(); Mockito.doReturn(expected.isCustomized()).when(serviceOfferingVoMock).isDynamic(); - ComputeOffering result = presetVariableHelperSpy.getPresetVariableValueComputeOffering(serviceOfferingVoMock); + ComputeOffering result = presetVariableHelperSpy.getPresetVariableValueComputeOffering(serviceOfferingVoMock, UsageTypes.ALLOCATED_VM); assertPresetVariableIdAndName(expected, result); Assert.assertEquals(expected.isCustomized(), result.isCustomized()); validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "customized"), result); } + @Test public void getPresetVariableValueTemplateTestSetValuesAndReturnObject() { VMTemplateVO vmTemplateVoMock = Mockito.mock(VMTemplateVO.class); @@ -698,6 +768,7 @@ public class PresetVariableHelperTest { Mockito.doReturn(expected.getName()).when(volumeVoMock).getName(); Mockito.doReturn(expected.getDiskOffering()).when(presetVariableHelperSpy).getPresetVariableValueDiskOffering(Mockito.anyLong()); Mockito.doReturn(expected.getProvisioningType()).when(volumeVoMock).getProvisioningType(); + Mockito.doReturn(expected.getVolumeType()).when(volumeVoMock).getVolumeType(); Mockito.doReturn(expected.getStorage()).when(presetVariableHelperSpy).getPresetVariableValueStorage(Mockito.anyLong(), Mockito.anyInt()); Mockito.doReturn(expected.getTags()).when(presetVariableHelperSpy).getPresetVariableValueResourceTags(Mockito.anyLong(), Mockito.any(ResourceObjectType.class)); Mockito.doReturn(expected.getSize()).when(volumeVoMock).getSize(); @@ -713,12 +784,13 @@ public class PresetVariableHelperTest { assertPresetVariableIdAndName(expected, result); Assert.assertEquals(expected.getDiskOffering(), result.getDiskOffering()); Assert.assertEquals(expected.getProvisioningType(), result.getProvisioningType()); + Assert.assertEquals(expected.getVolumeType(), result.getVolumeType()); Assert.assertEquals(expected.getStorage(), result.getStorage()); Assert.assertEquals(expected.getTags(), result.getTags()); Assert.assertEquals(expectedSize, result.getSize()); Assert.assertEquals(imageFormat.name(), result.getVolumeFormat()); - validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "storage", "tags", "size", "volumeFormat"), result); + validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "volumeType", "storage", "tags", "size", "volumeFormat"), result); } Mockito.verify(presetVariableHelperSpy, Mockito.times(ImageFormat.values().length)).getPresetVariableValueResourceTags(Mockito.anyLong(), @@ -740,6 +812,7 @@ public class PresetVariableHelperTest { Mockito.doReturn(expected.getName()).when(volumeVoMock).getName(); Mockito.doReturn(expected.getDiskOffering()).when(presetVariableHelperSpy).getPresetVariableValueDiskOffering(Mockito.anyLong()); Mockito.doReturn(expected.getProvisioningType()).when(volumeVoMock).getProvisioningType(); + Mockito.doReturn(expected.getVolumeType()).when(volumeVoMock).getVolumeType(); Mockito.doReturn(expected.getTags()).when(presetVariableHelperSpy).getPresetVariableValueResourceTags(Mockito.anyLong(), Mockito.any(ResourceObjectType.class)); Mockito.doReturn(expected.getSize()).when(volumeVoMock).getSize(); Mockito.doReturn(imageFormat).when(volumeVoMock).getFormat(); @@ -754,12 +827,13 @@ public class PresetVariableHelperTest { assertPresetVariableIdAndName(expected, result); Assert.assertEquals(expected.getDiskOffering(), result.getDiskOffering()); Assert.assertEquals(expected.getProvisioningType(), result.getProvisioningType()); + Assert.assertEquals(expected.getVolumeType(), result.getVolumeType()); Assert.assertNull(result.getStorage()); Assert.assertEquals(expected.getTags(), result.getTags()); Assert.assertEquals(expectedSize, result.getSize()); Assert.assertEquals(imageFormat.name(), result.getVolumeFormat()); - validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "tags", "size", "volumeFormat"), result); + validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "volumeType", "tags", "size", "volumeFormat"), result); } Mockito.verify(presetVariableHelperSpy, Mockito.times(ImageFormat.values().length)).getPresetVariableValueResourceTags(Mockito.anyLong(), @@ -772,14 +846,15 @@ public class PresetVariableHelperTest { Mockito.doReturn(diskOfferingVoMock).when(diskOfferingDaoMock).findByIdIncludingRemoved(Mockito.anyLong()); mockMethodValidateIfObjectIsNull(); - GenericPresetVariable expected = getGenericPresetVariableForTests(); + DiskOfferingPresetVariables expected = getDiskOfferingForTests(); Mockito.doReturn(expected.getId()).when(diskOfferingVoMock).getUuid(); Mockito.doReturn(expected.getName()).when(diskOfferingVoMock).getName(); GenericPresetVariable result = presetVariableHelperSpy.getPresetVariableValueDiskOffering(1l); assertPresetVariableIdAndName(expected, result); - validateFieldNamesToIncludeInToString(Arrays.asList("id", "name"), result); + validateFieldNamesToIncludeInToString(Arrays.asList("bytesReadBurst", "bytesReadBurstLength", "bytesReadRate", "bytesWriteBurst", "bytesWriteBurstLength", "bytesWriteRate", + "id", "iopsReadBurst", "iopsReadBurstLength", "iopsReadRate", "iopsWriteBurst", "iopsWriteBurstLength", "iopsWriteRate", "name"), result); } @Test @@ -1113,7 +1188,7 @@ public class PresetVariableHelperTest { Mockito.doReturn(serviceOfferingVoMock).when(serviceOfferingDaoMock).findByIdIncludingRemoved(Mockito.anyLong()); mockMethodValidateIfObjectIsNull(); - Mockito.doReturn(expected.getComputeOffering()).when(presetVariableHelperSpy).getPresetVariableValueComputeOffering(Mockito.any()); + Mockito.doReturn(expected.getComputeOffering()).when(presetVariableHelperSpy).getPresetVariableValueComputeOffering(Mockito.any(), Mockito.anyInt()); Mockito.doReturn(expected.getComputingResources()).when(presetVariableHelperSpy).getPresetVariableValueComputingResource(Mockito.any(), Mockito.any()); QuotaTypes.listQuotaTypes().forEach((typeInt, value) -> { diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java index b913033259c..170e3b40e94 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java @@ -39,7 +39,7 @@ import com.cloud.utils.mgmt.ManagementBean; public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { - Map> sorted = new TreeMap>(); + Map> sorted = new TreeMap<>(); public CloudStackExtendedLifeCycle() { super(); @@ -80,13 +80,8 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { ManagementBean mbean = (ManagementBean)lifecycle; try { JmxUtil.registerMBean(mbean); - } catch (MalformedObjectNameException e) { - logger.warn("Unable to register MBean: " + mbean.getName(), e); - } catch (InstanceAlreadyExistsException e) { - logger.warn("Unable to register MBean: " + mbean.getName(), e); - } catch (MBeanRegistrationException e) { - logger.warn("Unable to register MBean: " + mbean.getName(), e); - } catch (NotCompliantMBeanException e) { + } catch (MalformedObjectNameException | InstanceAlreadyExistsException | + MBeanRegistrationException | NotCompliantMBeanException e) { logger.warn("Unable to register MBean: " + mbean.getName(), e); } logger.info("Registered MBean: " + mbean.getName()); @@ -129,6 +124,7 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { throw new CloudRuntimeException(e); } catch (Exception e) { logger.error("Error on configuring bean {} - {}", lifecycle.getName(), e.getMessage(), e); + throw new CloudRuntimeException(e); } } }); @@ -141,7 +137,7 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { Set set = sorted.get(lifecycle.getRunLevel()); if (set == null) { - set = new HashSet(); + set = new HashSet<>(); sorted.put(lifecycle.getRunLevel(), set); } @@ -169,12 +165,7 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { } } - @Override - public int getPhase() { - return 2000; - } - - private static interface WithComponentLifeCycle { + private interface WithComponentLifeCycle { public void with(ComponentLifecycle lifecycle); } } diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java index 19d1fe3acc5..00e19304657 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java @@ -48,7 +48,7 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App * can use this. */ String registryBeanName; - Set beans = new HashSet(); + Set beans = new HashSet<>(); Class typeClass; ApplicationContext applicationContext; Set excludes = null; @@ -79,7 +79,7 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App protected synchronized void loadExcluded() { Properties props = applicationContext.getBean("DefaultConfigProperties", Properties.class); - excludes = new HashSet(); + excludes = new HashSet<>(); for (String exclude : props.getProperty(EXTENSION_EXCLUDE, "").trim().split("\\s*,\\s*")) { if (StringUtils.hasText(exclude)) { excludes.add(exclude); @@ -109,10 +109,15 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App while (iter.hasNext()) { Object next = iter.next(); - if (registry.register(next)) { - logger.debug("Registered " + next); - } else { - iter.remove(); + try { + if (registry.register(next)) { + logger.debug("Registered " + next); + } else { + logger.warn("Bean registration failed for " + next.toString()); + iter.remove(); + } + } catch (Throwable e) { + logger.warn("Bean registration attempt resulted in an exception for " + next.toString(), e); } } } diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index fbbb7abe350..244f4431a3b 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -71,7 +71,7 @@ Requires: (openssh-clients or openssh) Requires: (nfs-utils or nfs-client) Requires: iproute Requires: wget -Requires: mysql +Requires: (mysql or mariadb) Requires: sudo Requires: /sbin/service Requires: /sbin/chkconfig diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index db40b6e68dd..6bbd25bb440 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -26,33 +26,37 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.acl.RolePermissionEntity.Permission; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.lang3.StringUtils; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.UnavailableCommandException; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.User; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.PluggableService; -import org.apache.commons.lang3.StringUtils; public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements APIAclChecker { - @Inject private AccountService accountService; @Inject private RoleService roleService; private List services; - private Map> annotationRoleBasedApisMap = new HashMap>(); + private Map> annotationRoleBasedApisMap = new HashMap<>(); + private LazyCache accountCache; + private LazyCache>> rolePermissionsCache; + private int cachePeriod; protected DynamicRoleBasedAPIAccessChecker() { super(); for (RoleType roleType : RoleType.values()) { - annotationRoleBasedApisMap.put(roleType, new HashSet()); + annotationRoleBasedApisMap.put(roleType, new HashSet<>()); } } @@ -99,23 +103,66 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API annotationRoleBasedApisMap.get(role.getRoleType()).contains(apiName); } + protected Account getAccountFromId(long accountId) { + return accountService.getAccount(accountId); + } + + protected Pair> getRolePermissions(long roleId) { + final Role accountRole = roleService.findRole(roleId); + if (accountRole == null || accountRole.getId() < 1L) { + return new Pair<>(null, null); + } + + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + return new Pair<>(accountRole, null); + } + + return new Pair<>(accountRole, roleService.findAllPermissionsBy(accountRole.getId())); + } + + protected Pair> getRolePermissionsUsingCache(long roleId) { + if (cachePeriod > 0) { + return rolePermissionsCache.get(roleId); + } + return getRolePermissions(roleId); + } + + protected Account getAccountFromIdUsingCache(long accountId) { + if (cachePeriod > 0) { + return accountCache.get(accountId); + } + return getAccountFromId(accountId); + } + @Override public boolean checkAccess(User user, String commandName) throws PermissionDeniedException { if (!isEnabled()) { return true; } - - Account account = accountService.getAccount(user.getAccountId()); + Account account = getAccountFromIdUsingCache(user.getAccountId()); if (account == null) { - throw new PermissionDeniedException(String.format("The account id [%s] for user id [%s] is null.", user.getAccountId(), user.getUuid())); + throw new PermissionDeniedException(String.format("Account for user id [%s] cannot be found", user.getUuid())); } - - return checkAccess(account, commandName); + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { + throw new PermissionDeniedException(String.format("Account role for user id [%s] cannot be found.", user.getUuid())); + } + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + logger.info("Account for user id {} is Root Admin or Domain Admin, all APIs are allowed.", user.getUuid()); + return true; + } + List allPermissions = roleAndPermissions.second(); + if (checkApiPermissionByRole(accountRole, commandName, allPermissions)) { + return true; + } + throw new UnavailableCommandException(String.format("The API [%s] does not exist or is not available for the account for user id [%s].", commandName, user.getUuid())); } public boolean checkAccess(Account account, String commandName) { - final Role accountRole = roleService.findRole(account.getRoleId()); - if (accountRole == null || accountRole.getId() < 1L) { + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { throw new PermissionDeniedException(String.format("The account [%s] has role null or unknown.", account)); } @@ -160,6 +207,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); + cachePeriod = Math.max(0, RoleService.DynamicApiCheckerCachePeriod.value()); + accountCache = new LazyCache<>(32, cachePeriod, this::getAccountFromId); + rolePermissionsCache = new LazyCache<>(32, cachePeriod, this::getRolePermissions); return true; } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index 0ed658aa70d..667b475eada 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -321,13 +321,13 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } } //add all hosts inside this in includeList - List hostList = _hostDao.listByDataCenterId(dr.getDataCenterId()); - for (HostVO host : hostList) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + List hostList = _hostDao.listEnabledIdsByDataCenterId(dr.getDataCenterId()); + for (Long hostId : hostList) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null && !dedicatedResources.contains(dHost)) { - avoidList.addHost(host.getId()); + avoidList.addHost(hostId); } else { - includeList.addHost(host.getId()); + includeList.addHost(hostId); } } } @@ -337,7 +337,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement List pods = _podDao.listByDataCenterId(dc.getId()); List clusters = _clusterDao.listClustersByDcId(dc.getId()); - List hosts = _hostDao.listByDataCenterId(dc.getId()); + List hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); Set podsInIncludeList = includeList.getPodsToAvoid(); Set clustersInIncludeList = includeList.getClustersToAvoid(); Set hostsInIncludeList = includeList.getHostsToAvoid(); @@ -357,9 +357,9 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } } - for (HostVO host : hosts) { - if (hostsInIncludeList != null && !hostsInIncludeList.contains(host.getId())) { - avoidList.addHost(host.getId()); + for (Long hostId : hostIds) { + if (hostsInIncludeList != null && !hostsInIncludeList.contains(hostId)) { + avoidList.addHost(hostId); } } return avoidList; diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index d4b3cff0f5c..6935d177c72 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -24,6 +24,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.configuration.Resource; import com.cloud.storage.dao.VolumeDao; import org.apache.cloudstack.backup.dao.BackupDao; @@ -99,6 +100,16 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { return metrics; } + @Override + public List listRestorePoints(VirtualMachine vm) { + return null; + } + + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + return null; + } + @Override public boolean removeVMFromBackupOffering(VirtualMachine vm) { logger.debug(String.format("Removing VM %s from backup offering by the Dummy Backup Provider", vm)); @@ -111,7 +122,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { } @Override - public boolean takeBackup(VirtualMachine vm) { + public Pair takeBackup(VirtualMachine vm) { logger.debug(String.format("Starting backup for VM %s on Dummy provider", vm)); BackupVO backup = new BackupVO(); @@ -119,23 +130,20 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { backup.setExternalId("dummy-external-id"); backup.setType("FULL"); backup.setDate(new Date()); - backup.setSize(1024L); - backup.setProtectedSize(1024000L); + backup.setSize(1024000L); + backup.setProtectedSize(1 * Resource.ResourceType.bytesToGiB); backup.setStatus(Backup.Status.BackedUp); backup.setBackupOfferingId(vm.getBackupOfferingId()); backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); - return backupDao.persist(backup) != null; + backup = backupDao.persist(backup); + return new Pair<>(true, backup); } @Override public boolean deleteBackup(Backup backup, boolean forced) { return true; } - - @Override - public void syncBackups(VirtualMachine vm, Backup.Metric metric) { - } } diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java index 5d3d1a91933..f148c53e614 100644 --- a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -46,6 +46,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; + import javax.inject.Inject; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -141,7 +142,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } @Override - public boolean takeBackup(final VirtualMachine vm) { + public Pair takeBackup(final VirtualMachine vm) { final Host host = getVMHypervisorHost(vm); final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); @@ -179,12 +180,16 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co backupVO.setSize(answer.getSize()); backupVO.setStatus(Backup.Status.BackedUp); backupVO.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); - return backupDao.update(backupVO.getId(), backupVO); + if (backupDao.update(backupVO.getId(), backupVO)) { + return new Pair<>(true, backupVO); + } else { + throw new CloudRuntimeException("Failed to update backup"); + } } else { backupVO.setStatus(Backup.Status.Failed); backupDao.remove(backupVO.getId()); + return new Pair<>(false, null); } - return Objects.nonNull(answer) && answer.getResult(); } private BackupVO createBackupObject(VirtualMachine vm, String backupPath) { @@ -358,6 +363,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co return backupDao.remove(backup.getId()); } + logger.debug("There was an error removing the backup with id " + backup.getId()); return false; } @@ -383,6 +389,16 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co return metrics; } + @Override + public List listRestorePoints(VirtualMachine vm) { + return null; + } + + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + return null; + } + @Override public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { return Hypervisor.HypervisorType.KVM.equals(vm.getHypervisorType()); @@ -398,11 +414,6 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co return false; } - @Override - public void syncBackups(VirtualMachine vm, Backup.Metric metric) { - // TODO: check and sum/return backups metrics on per VM basis - } - @Override public List listBackupOfferings(Long zoneId) { final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index 393e2911ac3..822688a86a3 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -29,15 +29,11 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallbackNoReturn; -import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupOfferingDaoImpl; import org.apache.cloudstack.backup.networker.NetworkerClient; @@ -462,7 +458,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid } @Override - public boolean takeBackup(VirtualMachine vm) { + public Pair takeBackup(VirtualMachine vm) { String networkerServer; String clusterName; @@ -514,11 +510,11 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid if (backup != null) { backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); backupDao.persist(backup); - return true; + return new Pair<>(true, backup); } else { LOG.error("Could not register backup for vm {} with saveset Time: {}", vm, saveTime); // We need to handle this rare situation where backup is successful but can't be registered properly. - return false; + return new Pair<>(false, null); } } @@ -532,7 +528,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid LOG.debug("EMC Networker successfully deleted backup with id " + externalBackupId); return true; } else { - LOG.debug("There was an error removing the backup with id " + externalBackupId + " from EMC NEtworker"); + LOG.debug("There was an error removing the backup with id " + externalBackupId + " from EMC Networker"); } return false; } @@ -550,12 +546,12 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid for (final VirtualMachine vm : vms) { for ( Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) { - vmBackupSize += (thisVMVol.getSize() / 1024L / 1024L); + vmBackupProtectedSize += (thisVMVol.getSize() / 1024L / 1024L); } final ArrayList vmBackups = getClient(zoneId).getBackupsForVm(vm); for ( String vmBackup : vmBackups ) { NetworkerBackup vmNwBackup = getClient(zoneId).getNetworkerBackupInfo(vmBackup); - vmBackupProtectedSize+= vmNwBackup.getSize().getValue() / 1024L; + vmBackupSize += vmNwBackup.getSize().getValue() / 1024L; } Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); LOG.debug(String.format("Metrics for VM [%s] is [backup size: %s, data size: %s].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); @@ -565,83 +561,53 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid } @Override - public void syncBackups(VirtualMachine vm, Backup.Metric metric) { - final Long zoneId = vm.getDataCenterId(); - Transaction.execute(new TransactionCallbackNoReturn() { - @Override - public void doInTransactionWithoutResult(TransactionStatus status) { - final List backupsInDb = backupDao.listByVmId(null, vm.getId()); - final ArrayList backupsInNetworker = getClient(zoneId).getBackupsForVm(vm); - final List removeList = backupsInDb.stream().map(InternalIdentity::getId).collect(Collectors.toList()); - for (final String networkerBackupId : backupsInNetworker ) { - Long vmBackupSize=0L; - boolean backupExists = false; - for (final Backup backupInDb : backupsInDb) { - LOG.debug(String.format("Checking if Backup %s with external ID %s for VM %s is valid", backupsInDb, backupInDb.getName(), vm)); - if ( networkerBackupId.equals(backupInDb.getExternalId()) ) { - LOG.debug(String.format("Found Backup %s in both Database and Networker", backupInDb)); - backupExists = true; - removeList.remove(backupInDb.getId()); - if (metric != null) { - LOG.debug(String.format("Update backup [%s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backupInDb, backupInDb.getSize(), backupInDb.getProtectedSize(), - metric.getBackupSize(), metric.getDataSize())); - ((BackupVO) backupInDb).setSize(metric.getBackupSize()); - ((BackupVO) backupInDb).setProtectedSize(metric.getDataSize()); - backupDao.update(backupInDb.getId(), ((BackupVO) backupInDb)); - } - break; - } - } - if (backupExists) { - continue; - } - // Technically an administrator can manually create a backup for a VM by utilizing the KVM scripts - // with the proper parameters. So we will register any backups taken on the Networker side from - // outside Cloudstack. If ever Networker will support KVM out of the box this functionality also will - // ensure that SLA like backups will be found and registered. - NetworkerBackup strayNetworkerBackup = getClient(vm.getDataCenterId()).getNetworkerBackupInfo(networkerBackupId); - // Since running backups are already present in Networker Server but not completed - // make sure the backup is not in progress at this time. - if ( strayNetworkerBackup.getCompletionTime() != null) { - BackupVO strayBackup = new BackupVO(); - strayBackup.setVmId(vm.getId()); - strayBackup.setExternalId(strayNetworkerBackup.getId()); - strayBackup.setType(strayNetworkerBackup.getType()); - SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); - try { - strayBackup.setDate(formatterDateTime.parse(strayNetworkerBackup.getSaveTime())); - } catch (ParseException e) { - String msg = String.format("Unable to parse date [%s].", strayNetworkerBackup.getSaveTime()); - LOG.error(msg, e); - throw new CloudRuntimeException(msg, e); - } - strayBackup.setStatus(Backup.Status.BackedUp); - for ( Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) { - vmBackupSize += (thisVMVol.getSize() / 1024L /1024L); - } - strayBackup.setSize(vmBackupSize); - strayBackup.setProtectedSize(strayNetworkerBackup.getSize().getValue() / 1024L ); - strayBackup.setBackupOfferingId(vm.getBackupOfferingId()); - strayBackup.setAccountId(vm.getAccountId()); - strayBackup.setDomainId(vm.getDomainId()); - strayBackup.setZoneId(vm.getDataCenterId()); - LOG.debug(String.format("Creating a new entry in backups: [id: %s, uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " - + "domain_id: %s, zone_id: %s].", strayBackup.getId(), strayBackup.getUuid(), strayBackup.getVmId(), strayBackup.getExternalId(), - strayBackup.getType(), strayBackup.getDate(), strayBackup.getBackupOfferingId(), strayBackup.getAccountId(), - strayBackup.getDomainId(), strayBackup.getZoneId())); - backupDao.persist(strayBackup); - LOG.warn("Added backup found in provider [" + strayBackup + "]"); - } else { - LOG.debug ("Backup is in progress, skipping addition for this run"); - } - } - for (final Long backupIdToRemove : removeList) { - LOG.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove)); - backupDao.remove(backupIdToRemove); - } + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + // Technically an administrator can manually create a backup for a VM by utilizing the KVM scripts + // with the proper parameters. So we will register any backups taken on the Networker side from + // outside Cloudstack. If ever Networker will support KVM out of the box this functionality also will + // ensure that SLA like backups will be found and registered. + NetworkerBackup strayNetworkerBackup = getClient(vm.getDataCenterId()).getNetworkerBackupInfo(restorePoint.getId()); + + // Since running backups are already present in Networker Server but not completed + // make sure the backup is not in progress at this time. + if (strayNetworkerBackup.getCompletionTime() != null) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(strayNetworkerBackup.getId()); + backup.setType(strayNetworkerBackup.getType()); + SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); + try { + backup.setDate(formatterDateTime.parse(strayNetworkerBackup.getSaveTime())); + } catch (ParseException e) { + String msg = String.format("Unable to parse date [%s].", strayNetworkerBackup.getSaveTime()); + LOG.error(msg, e); + throw new CloudRuntimeException(msg, e); } - }); + backup.setStatus(Backup.Status.BackedUp); + Long vmBackupProtectedSize=0L; + for (Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) { + vmBackupProtectedSize += (thisVMVol.getSize() / 1024L / 1024L); + } + backup.setSize(strayNetworkerBackup.getSize().getValue() / 1024L); + backup.setProtectedSize(vmBackupProtectedSize); + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backupDao.persist(backup); + return backup; + } + LOG.debug ("Backup is in progress, skipping addition for this run"); + return null; + } + + @Override + public List listRestorePoints(VirtualMachine vm) { + final Long zoneId = vm.getDataCenterId(); + final ArrayList backupIds = getClient(zoneId).getBackupsForVm(vm); + List restorePoints = + backupIds.stream().map(id -> new Backup.RestorePoint(id, null, null)).collect(Collectors.toList()); + return restorePoints; } @Override diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index c120d8bd599..a7ce0c09cc6 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -29,8 +29,6 @@ import java.util.stream.Collectors; import javax.inject.Inject; -import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.backup.Backup.Metric; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.veeam.VeeamClient; @@ -42,20 +40,15 @@ import org.apache.commons.lang3.BooleanUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventTypes; -import com.cloud.event.EventVO; import com.cloud.hypervisor.Hypervisor; import com.cloud.dc.VmwareDatacenter; import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMap; import com.cloud.dc.dao.VmwareDatacenterDao; import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.user.User; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallbackNoReturn; -import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -109,6 +102,8 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, private AgentManager agentMgr; @Inject private VirtualMachineManager virtualMachineManager; + @Inject + private VolumeDao volumeDao; protected VeeamClient getClient(final Long zoneId) { try { @@ -220,9 +215,10 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, } @Override - public boolean takeBackup(final VirtualMachine vm) { + public Pair takeBackup(final VirtualMachine vm) { final VeeamClient client = getClient(vm.getDataCenterId()); - return client.startBackupJob(vm.getBackupExternalId()); + Boolean result = client.startBackupJob(vm.getBackupExternalId()); + return new Pair<>(result, null); } @Override @@ -322,26 +318,30 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, return metrics; } - private List listRestorePoints(VirtualMachine vm) { - String backupName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); - return getClient(vm.getDataCenterId()).listRestorePoints(backupName, vm.getInstanceName()); + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(restorePoint.getId()); + backup.setType(restorePoint.getType()); + backup.setDate(restorePoint.getCreated()); + backup.setStatus(Backup.Status.BackedUp); + if (metric != null) { + backup.setSize(metric.getBackupSize()); + backup.setProtectedSize(metric.getDataSize()); + } + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backupDao.persist(backup); + return backup; } - private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(List backupsInDb, Backup.RestorePoint restorePoint, Backup.Metric metric) { - for (final Backup backup : backupsInDb) { - if (restorePoint.getId().equals(backup.getExternalId())) { - if (metric != null) { - logger.debug("Update backup with [id: {}, uuid: {}, name: {}, external id: {}] from [size: {}, protected size: {}] to [size: {}, protected size: {}].", - backup.getId(), backup.getUuid(), backup.getName(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize()); - - ((BackupVO) backup).setSize(metric.getBackupSize()); - ((BackupVO) backup).setProtectedSize(metric.getDataSize()); - backupDao.update(backup.getId(), ((BackupVO) backup)); - } - return backup; - } - } - return null; + @Override + public List listRestorePoints(VirtualMachine vm) { + String backupName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); + return getClient(vm.getDataCenterId()).listRestorePoints(backupName, vm.getInstanceName()); } @Override @@ -378,6 +378,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); + backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); logger.debug("Creating a new entry in backups: [id: {}, uuid: {}, name: {}, vm_id: {}, external_id: {}, type: {}, date: {}, backup_offering_id: {}, account_id: {}, " + "domain_id: {}, zone_id: {}].", backup.getId(), backup.getUuid(), backup.getName(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId()); diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java index d911736090c..9accc0714de 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java @@ -844,11 +844,11 @@ public class VeeamClient { "if ($restore) { $restore ^| Format-List } }" ); Pair response = executePowerShellCommands(cmds); - final List restorePoints = new ArrayList<>(); if (response == null || !response.first()) { - return restorePoints; + return null; } + final List restorePoints = new ArrayList<>(); for (final String block : response.second().split("\r\n\r\n")) { if (block.isEmpty()) { continue; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index cd7dc2bbbad..1f020726793 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -23,7 +23,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -45,8 +44,9 @@ import org.apache.cloudstack.api.response.DedicatePodResponse; import org.apache.cloudstack.api.response.DedicateZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -126,7 +126,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Zone") public List dedicateZone(final Long zoneId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -203,18 +203,20 @@ public class DedicatedResourceManagerImpl implements DedicatedService { releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.listByDataCenterId(dc.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error(String.format("Host %s under this Zone %s is dedicated to different account/domain", host, dc)); + HostVO host = _hostDao.findById(hostId); + logger.error("{} under {} is dedicated to different account/domain", host, dc); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -230,7 +232,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -284,7 +286,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { childDomainIds.add(domainId); checkAccountAndDomain(accountId, domainId); HostPodVO pod = _podDao.findById(podId); - List hosts = null; + List hostIds = null; if (pod == null) { throw new InvalidParameterValueException("Unable to find pod by id " + podId); } else { @@ -339,18 +341,20 @@ public class DedicatedResourceManagerImpl implements DedicatedService { releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.findByPodId(pod.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listIdsByPodId(pod.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(getDomainChildIds(domainId).contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error(String.format("Host %s under this Pod %s is dedicated to different account/domain", host, pod)); + HostVO host = _hostDao.findById(hostId); + logger.error("{} under this {} is dedicated to different account/domain", host, pod); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -366,7 +370,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -402,7 +406,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Cluster") public List dedicateCluster(final Long clusterId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -448,12 +452,13 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } //check if any resource under this cluster is dedicated to different account or sub-domain - hosts = _hostDao.findByClusterId(cluster.getId()); + hostIds = _hostDao.listIdsByClusterId(cluster.getId()); List hostsToRelease = new ArrayList(); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Cluster " + cluster.getName() + " is dedicated to different account/domain"); } @@ -479,7 +484,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -576,7 +581,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); - checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host.getId()); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -662,13 +667,14 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return vms; } - private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, Host host) { + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { boolean suitable = true; - List allVmsOnHost = getVmsOnHost(host.getId()); + List allVmsOnHost = getVmsOnHost(hostId); if (accountId != null) { for (UserVmVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another account", host)); + Host host = _hostDao.findById(hostId); + logger.info("{} found to be unsuitable for explicit dedication as it is running instances of another account", host); throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); } @@ -676,7 +682,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } else { for (UserVmVO vm : allVmsOnHost) { if (!domainIds.contains(vm.getDomainId())) { - logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another domain", host)); + Host host = _hostDao.findById(hostId); + logger.info("{} found to be unsuitable for explicit dedication as it is running instances of another domain", host); throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); } @@ -685,10 +692,10 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return suitable; } - private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { + private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hostIds) { boolean suitable = true; - for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host); + for (Long hostId : hostIds) { + checkHostSuitabilityForExplicitDedication(accountId, domainIds, hostId); } return suitable; } diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index b971b3b8596..f9cde2ae441 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -21,14 +21,15 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import com.cloud.configuration.Config; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.host.HostVO; import com.cloud.resource.ResourceManager; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -38,7 +39,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachineProfile; -import org.springframework.util.CollectionUtils; public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { @@ -73,12 +73,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId()); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - allHosts.add(hostVO.getId()); - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of @@ -224,20 +223,15 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy } private List getUpdatedClusterList(List clusterList, Set hostsSet) { - List updatedClusterList = new ArrayList(); - for (Long cluster : clusterList) { - List hosts = resourceMgr.listAllHostsInCluster(cluster); - Set hostsInClusterSet = new HashSet(); - for (HostVO host : hosts) { - hostsInClusterSet.add(host.getId()); - } - - if (!hostsSet.containsAll(hostsInClusterSet)) { - updatedClusterList.add(cluster); - } + if (CollectionUtils.isEmpty(clusterList)) { + return new ArrayList<>(); } - - return updatedClusterList; + return clusterList.stream() + .filter(cluster -> { + Set hostsInClusterSet = new HashSet<>(hostDao.listIdsByClusterId(cluster)); + return !hostsSet.containsAll(hostsInClusterSet); + }) + .collect(Collectors.toList()); } @Override @@ -257,15 +251,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy Account account = vmProfile.getOwner(); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - if (!CollectionUtils.isEmpty(clusterList)) { - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - - allHosts.add(hostVO.getId()); - } - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of // 1. All empty hosts, not running any vms. diff --git a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java index e174824cfdd..2d2b4c78261 100644 --- a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java +++ b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -16,11 +16,11 @@ // under the License. package org.apache.cloudstack.implicitplanner; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.everyItem; -import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -36,7 +36,11 @@ import java.util.UUID; import javax.inject.Inject; -import com.cloud.user.User; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -54,12 +58,6 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; - import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; @@ -73,7 +71,6 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.ImplicitDedicationPlanner; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.gpu.dao.HostGpuGroupsDao; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.host.dao.HostTagsDao; @@ -90,6 +87,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; +import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; @@ -387,21 +385,9 @@ public class ImplicitPlannerTest { when(serviceOfferingDetailsDao.listDetailsKeyPairs(offeringId)).thenReturn(details); // Initialize hosts in clusters - HostVO host1 = mock(HostVO.class); - when(host1.getId()).thenReturn(5L); - HostVO host2 = mock(HostVO.class); - when(host2.getId()).thenReturn(6L); - HostVO host3 = mock(HostVO.class); - when(host3.getId()).thenReturn(7L); - List hostsInCluster1 = new ArrayList(); - List hostsInCluster2 = new ArrayList(); - List hostsInCluster3 = new ArrayList(); - hostsInCluster1.add(host1); - hostsInCluster2.add(host2); - hostsInCluster3.add(host3); - when(resourceMgr.listAllHostsInCluster(1)).thenReturn(hostsInCluster1); - when(resourceMgr.listAllHostsInCluster(2)).thenReturn(hostsInCluster2); - when(resourceMgr.listAllHostsInCluster(3)).thenReturn(hostsInCluster3); + when(hostDao.listIdsByClusterId(1L)).thenReturn(List.of(5L)); + when(hostDao.listIdsByClusterId(2L)).thenReturn(List.of(6L)); + when(hostDao.listIdsByClusterId(3L)).thenReturn(List.of(7L)); // Mock vms on each host. long offeringIdForVmsOfThisAccount = 15L; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 47edd2eff18..29374f3e594 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -18,6 +18,7 @@ package com.cloud.hypervisor.kvm.resource; import static com.cloud.host.Host.HOST_INSTANCE_CONVERSION; import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION; +import static org.apache.cloudstack.utils.linux.KVMHostInfo.isHostS390x; import java.io.BufferedReader; import java.io.File; @@ -244,11 +245,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv private static final String LEGACY = "legacy"; private static final String SECURE = "secure"; + /** + * Machine type for s390x architecture + */ + private static final String S390X_VIRTIO_DEVICE = "s390-ccw-virtio"; + /** * Machine type. */ - private static final String PC = "pc"; - private static final String VIRT = "virt"; + private static final String PC = isHostS390x() ? S390X_VIRTIO_DEVICE : "pc"; + private static final String VIRT = isHostS390x() ? S390X_VIRTIO_DEVICE : "virt"; /** * Possible devices to add to VM. @@ -305,6 +311,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv * Constant that defines ARM64 (aarch64) guest architectures. */ private static final String AARCH64 = "aarch64"; + /** + * Constant that defines IBM Z Arch (s390x) guest architectures. + */ + private static final String S390X = "s390x"; public static final String RESIZE_NOTIFY_ONLY = "NOTIFYONLY"; public static final String BASEPATH = "/usr/share/cloudstack-common/vms/"; @@ -1796,7 +1806,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv "^dummy", "^lo", "^p\\d+p\\d+", - "^vni" + "^vni", + "^enc" }; /** @@ -2642,12 +2653,15 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } devices.addDevice(createChannelDef(vmTO)); - devices.addDevice(createWatchDogDef()); + if (!isGuestS390x()) { + devices.addDevice(createWatchDogDef()); + } devices.addDevice(createVideoDef(vmTO)); devices.addDevice(createConsoleDef()); devices.addDevice(createGraphicDef(vmTO)); - devices.addDevice(createTabletInputDef()); - + if (!isGuestS390x()) { + devices.addDevice(createTabletInputDef()); + } if (isGuestAarch64()) { createArm64UsbDef(devices); } @@ -2765,7 +2779,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv FeaturesDef features = new FeaturesDef(); features.addFeatures(PAE); features.addFeatures(APIC); - features.addFeatures(ACPI); + if (!isHostS390x()) { + features.addFeatures(ACPI); + } if (isUefiEnabled && isSecureBoot) { features.addFeatures(SMM); } @@ -2857,6 +2873,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return AARCH64.equals(guestCpuArch); } + private boolean isGuestS390x() { + return S390X.equals(guestCpuArch); + } + /** * Creates a guest definition from a VM specification. */ @@ -2867,7 +2887,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv guest.setManufacturer(vmTO.getMetadataManufacturer()); guest.setProduct(vmTO.getMetadataProductName()); guest.setGuestArch(guestCpuArch != null ? guestCpuArch : vmTO.getArch()); - guest.setMachineType(isGuestAarch64() ? VIRT : PC); + guest.setMachineType((isGuestAarch64() || isGuestS390x()) ? VIRT : PC); guest.setBootType(GuestDef.BootType.BIOS); if (MapUtils.isNotEmpty(customParams)) { if (customParams.containsKey(GuestDef.BootType.UEFI.toString())) { @@ -2881,7 +2901,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv guest.setIothreads(customParams.containsKey(VmDetailConstants.IOTHREADS)); } guest.setUuid(uuid); - guest.setBootOrder(GuestDef.BootOrder.CDROM); + if(!isGuestS390x()) { + guest.setBootOrder(GuestDef.BootOrder.CDROM); + } guest.setBootOrder(GuestDef.BootOrder.HARDISK); return guest; } @@ -3122,7 +3144,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv final DiskDef.DiskType diskType = getDiskType(physicalDisk); disk.defISODisk(volPath, devId, isUefiEnabled, diskType); - if (guestCpuArch != null && guestCpuArch.equals("aarch64")) { + if (guestCpuArch != null && (guestCpuArch.equals("aarch64") || guestCpuArch.equals("s390x"))) { disk.setBusType(DiskDef.DiskBus.SCSI); } } else { @@ -3133,7 +3155,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv disk.setLogicalBlockIOSize(pool.getSupportedLogicalBlockSize()); disk.setPhysicalBlockIOSize(pool.getSupportedPhysicalBlockSize()); - if (diskBusType == DiskDef.DiskBus.SCSI ) { + if (diskBusType == DiskDef.DiskBus.SCSI || diskBusType == DiskDef.DiskBus.VIRTIOBLK) { disk.setQemuDriver(true); disk.setDiscard(DiscardType.UNMAP); } @@ -3204,7 +3226,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv disk.setCacheMode(DiskDef.DiskCacheMode.valueOf(volumeObjectTO.getCacheMode().toString().toUpperCase())); } - if (volumeObjectTO.requiresEncryption()) { + if (volumeObjectTO.requiresEncryption() && + pool.getType().encryptionSupportMode() == Storage.EncryptionSupport.Hypervisor ) { String secretUuid = createLibvirtVolumeSecret(conn, volumeObjectTO.getPath(), volumeObjectTO.getPassphrase()); DiskDef.LibvirtDiskEncryptDetails encryptDetails = new DiskDef.LibvirtDiskEncryptDetails(secretUuid, QemuObject.EncryptFormat.enumValue(volumeObjectTO.getEncryptFormat())); disk.setLibvirtDiskEncryptDetails(encryptDetails); @@ -3220,7 +3243,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (vmSpec.getType() != VirtualMachine.Type.User) { final DiskDef iso = new DiskDef(); iso.defISODisk(sysvmISOPath, DiskDef.DiskType.FILE); - if (guestCpuArch != null && guestCpuArch.equals("aarch64")) { + if (guestCpuArch != null && (guestCpuArch.equals("aarch64") || guestCpuArch.equals("s390x"))) { iso.setBusType(DiskDef.DiskBus.SCSI); } vm.getDevices().addDevice(iso); @@ -4294,7 +4317,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return DiskDef.DiskBus.VIRTIO; } else if (isUefiEnabled && StringUtils.startsWithAny(platformEmulator, "Windows", "Other")) { return DiskDef.DiskBus.SATA; - } else if (guestCpuArch != null && guestCpuArch.equals("aarch64")) { + } else if (guestCpuArch != null && (guestCpuArch.equals("aarch64") || guestCpuArch.equals("s390x"))) { return DiskDef.DiskBus.SCSI; } else { return DiskDef.DiskBus.IDE; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index 39373ab6e3b..1c504b6239f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -248,7 +248,9 @@ public class LibvirtVMDef { guestDef.append("\n"); } } - guestDef.append("\n"); + if (_arch == null || ! (_arch.equals("aarch64") || _arch.equals("s390x"))) { // simplification of (as ref.) (!(_arch != null && _arch.equals("s390x")) || (_arch == null || !_arch.equals("aarch64"))) + guestDef.append("\n"); + } guestDef.append("\n"); if (iothreads) { guestDef.append(String.format("%s", NUMBER_OF_IOTHREADS)); @@ -580,7 +582,7 @@ public class LibvirtVMDef { } } - if (_emulator != null && _emulator.endsWith("aarch64")) { + if (_emulator != null && (_emulator.endsWith("aarch64") || _emulator.endsWith("s390x"))) { devicesBuilder.append("\n"); for (int i = 0; i < 32; i++) { devicesBuilder.append("\n"); @@ -678,7 +680,7 @@ public class LibvirtVMDef { } public enum DiskBus { - IDE("ide"), SCSI("scsi"), VIRTIO("virtio"), XEN("xen"), USB("usb"), UML("uml"), FDC("fdc"), SATA("sata"); + IDE("ide"), SCSI("scsi"), VIRTIO("virtio"), XEN("xen"), USB("usb"), UML("uml"), FDC("fdc"), SATA("sata"), VIRTIOBLK("virtio-blk"); String _bus; DiskBus(String bus) { @@ -1652,7 +1654,7 @@ public class LibvirtVMDef { if (_scriptPath != null) { netBuilder.append("