diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index e9213ca9b8c..b1ec592b9fe 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -40,6 +40,8 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.naming.ConfigurationException; +import com.cloud.resource.AgentStatusUpdater; +import com.cloud.resource.ResourceStatusUpdater; import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.agent.lb.SetupMSListAnswer; import org.apache.cloudstack.agent.lb.SetupMSListCommand; @@ -100,7 +102,7 @@ import com.cloud.utils.script.Script; * For more configuration options, see the individual types. * **/ -public class Agent implements HandlerFactory, IAgentControl { +public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater { protected static Logger s_logger = Logger.getLogger(Agent.class); public enum ExitStatus { @@ -409,6 +411,20 @@ public class Agent implements HandlerFactory, IAgentControl { } } + public void triggerUpdate() { + PingCommand command = _resource.getCurrentStatus(getId()); + command.setOutOfBand(true); + s_logger.debug("Sending out of band ping"); + + final Request request = new Request(_id, -1, command, false); + request.setSequence(getNextSequence()); + try { + _link.send(request.toBytes()); + } catch (final ClosedChannelException e) { + s_logger.warn("Unable to send ping update: " + request.toString()); + } + } + protected void cancelTasks() { synchronized (_watchList) { for (final WatchTask task : _watchList) { @@ -461,6 +477,10 @@ public class Agent implements HandlerFactory, IAgentControl { } catch (final ClosedChannelException e) { s_logger.warn("Unable to send request: " + request.toString()); } + + if (_resource instanceof ResourceStatusUpdater) { + ((ResourceStatusUpdater) _resource).registerStatusUpdater(this); + } } } diff --git a/api/src/main/java/com/cloud/user/Account.java b/api/src/main/java/com/cloud/user/Account.java index b4cdb882c4b..bb9838f137a 100644 --- a/api/src/main/java/com/cloud/user/Account.java +++ b/api/src/main/java/com/cloud/user/Account.java @@ -30,7 +30,7 @@ public interface Account extends ControlledEntity, InternalIdentity, Identity { * Account states. * */ enum State { - DISABLED, ENABLED, LOCKED; + DISABLED, ENABLED, LOCKED, REMOVED; /** * The toString method was overridden to maintain consistency in the DB, as the GenericDaoBase uses toString in the enum value to make the sql statements diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java index abe7e31cfa2..63dc51452f0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.admin.config; +import com.cloud.utils.crypt.DBEncryptionUtil; import org.apache.cloudstack.acl.RoleService; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.log4j.Logger; @@ -150,25 +151,50 @@ public class UpdateCfgCmd extends BaseCmd { if (cfg != null) { ConfigurationResponse response = _responseGenerator.createConfigurationResponse(cfg); response.setResponseName(getCommandName()); - if (getZoneId() != null) { - response.setScope("zone"); - } - if (getClusterId() != null) { - response.setScope("cluster"); - } - if (getStoragepoolId() != null) { - response.setScope("storagepool"); - } - if (getAccountId() != null) { - response.setScope("account"); - } - if (getDomainId() != null) { - response.setScope("domain"); - } - response.setValue(value); + response = setResponseScopes(response); + response = setResponseValue(response, cfg); this.setResponseObject(response); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update config"); } } + + /** + * Sets the configuration value in the response. If the configuration is in the `Hidden` or `Secure` categories, the value is encrypted before being set in the response. + * @param response to be set with the configuration `cfg` value + * @param cfg to be used in setting the response value + * @return the response with the configuration's value + */ + public ConfigurationResponse setResponseValue(ConfigurationResponse response, Configuration cfg) { + if (cfg.isEncrypted()) { + response.setValue(DBEncryptionUtil.encrypt(getValue())); + } else { + response.setValue(getValue()); + } + return response; + } + + /** + * Sets the scope for the Configuration response only if the field is not null. + * @param response to be updated + * @return the response updated with the scopes + */ + public ConfigurationResponse setResponseScopes(ConfigurationResponse response) { + if (getZoneId() != null) { + response.setScope("zone"); + } + if (getClusterId() != null) { + response.setScope("cluster"); + } + if (getStoragepoolId() != null) { + response.setScope("storagepool"); + } + if (getAccountId() != null) { + response.setScope("account"); + } + if (getDomainId() != null) { + response.setScope("domain"); + } + return response; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java index 69b9b4cad9c..31a8b731491 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java @@ -178,6 +178,14 @@ public class SystemVmResponse extends BaseResponseWithAnnotations { @Param(description = "true if vm contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory.") private Boolean isDynamicallyScalable; + @SerializedName(ApiConstants.SERVICE_OFFERING_ID) + @Param(description = "the ID of the service offering of the system virtual machine.") + private String serviceOfferingId; + + @SerializedName("serviceofferingname") + @Param(description = "the name of the service offering of the system virtual machine.") + private String serviceOfferingName; + @Override public String getObjectId() { return this.getId(); @@ -466,4 +474,20 @@ public class SystemVmResponse extends BaseResponseWithAnnotations { public void setDynamicallyScalable(Boolean dynamicallyScalable) { isDynamicallyScalable = dynamicallyScalable; } + + public String getServiceOfferingId() { + return serviceOfferingId; + } + + public void setServiceOfferingId(String serviceOfferingId) { + this.serviceOfferingId = serviceOfferingId; + } + + public String getServiceOfferingName() { + return serviceOfferingName; + } + + public void setServiceOfferingName(String serviceOfferingName) { + this.serviceOfferingName = serviceOfferingName; + } } diff --git a/client/pom.xml b/client/pom.xml index 86e6724d3cc..e52f8b1935c 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -422,11 +422,6 @@ cloud-engine-components-api ${project.version} - - org.apache.cloudstack - cloud-engine-network - ${project.version} - org.apache.cloudstack cloud-engine-orchestration diff --git a/core/src/main/java/com/cloud/agent/api/PingCommand.java b/core/src/main/java/com/cloud/agent/api/PingCommand.java index 1d62c5d1359..4192fc2e747 100644 --- a/core/src/main/java/com/cloud/agent/api/PingCommand.java +++ b/core/src/main/java/com/cloud/agent/api/PingCommand.java @@ -24,6 +24,7 @@ import com.cloud.host.Host; public class PingCommand extends Command { Host.Type hostType; long hostId; + boolean outOfBand; protected PingCommand() { } @@ -33,6 +34,12 @@ public class PingCommand extends Command { hostId = id; } + public PingCommand(Host.Type type, long id, boolean oob) { + hostType = type; + hostId = id; + outOfBand = oob; + } + public Host.Type getHostType() { return hostType; } @@ -41,6 +48,10 @@ public class PingCommand extends Command { return hostId; } + public boolean getOutOfBand() { return outOfBand; } + + public void setOutOfBand(boolean oob) { this.outOfBand = oob; } + @Override public boolean executeInSequence() { return false; diff --git a/core/src/main/java/com/cloud/resource/AgentStatusUpdater.java b/core/src/main/java/com/cloud/resource/AgentStatusUpdater.java new file mode 100644 index 00000000000..63d5576c060 --- /dev/null +++ b/core/src/main/java/com/cloud/resource/AgentStatusUpdater.java @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.resource; + +/** + * AgentStatusUpdater is an agent with triggerable update functionality + */ +public interface AgentStatusUpdater { + /** + * Trigger the sending of an update (Ping). + */ + void triggerUpdate(); +} diff --git a/core/src/main/java/com/cloud/resource/ResourceStatusUpdater.java b/core/src/main/java/com/cloud/resource/ResourceStatusUpdater.java new file mode 100644 index 00000000000..df59e3a152e --- /dev/null +++ b/core/src/main/java/com/cloud/resource/ResourceStatusUpdater.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.resource; + +/** + * ResourceStatusUpdater is a resource that can trigger out of band status updates + */ +public interface ResourceStatusUpdater { + /** + * Register an AgentStatusUpdater to use for triggering out of band updates. + * + * @param updater The object to call triggerUpdate() on + */ + void registerStatusUpdater(AgentStatusUpdater updater); +} diff --git a/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java b/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java index e8618d54209..b1b76da8211 100644 --- a/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java +++ b/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java @@ -25,6 +25,9 @@ public class CheckUrlCommand extends Command { private String format; private String url; + private Integer connectTimeout; + private Integer connectionRequestTimeout; + private Integer socketTimeout; public String getFormat() { return format; @@ -34,12 +37,27 @@ public class CheckUrlCommand extends Command { return url; } + public Integer getConnectTimeout() { return connectTimeout; } + + public Integer getConnectionRequestTimeout() { return connectionRequestTimeout; } + + public Integer getSocketTimeout() { return socketTimeout; } + public CheckUrlCommand(final String format,final String url) { super(); this.format = format; this.url = url; } + public CheckUrlCommand(final String format,final String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) { + super(); + this.format = format; + this.url = url; + this.connectTimeout = connectTimeout; + this.socketTimeout = socketTimeout; + this.connectionRequestTimeout = connectionRequestTimeout; + } + @Override public boolean executeInSequence() { return false; diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java b/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java index 80509b19b1b..27e35b7074b 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java @@ -54,7 +54,7 @@ public class DirectDownloadHelper { public static boolean checkUrlExistence(String url) { try { - DirectTemplateDownloader checker = getCheckerDownloader(url); + DirectTemplateDownloader checker = getCheckerDownloader(url, null, null, null); return checker.checkUrl(url); } catch (CloudRuntimeException e) { LOGGER.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e); @@ -62,22 +62,37 @@ public class DirectDownloadHelper { } } - private static DirectTemplateDownloader getCheckerDownloader(String url) { + public static boolean checkUrlExistence(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) { + try { + DirectTemplateDownloader checker = getCheckerDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout); + return checker.checkUrl(url); + } catch (CloudRuntimeException e) { + LOGGER.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e); + return false; + } + } + + private static DirectTemplateDownloader getCheckerDownloader(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) { if (url.toLowerCase().startsWith("https:")) { - return new HttpsDirectTemplateDownloader(url); + return new HttpsDirectTemplateDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout); } else if (url.toLowerCase().startsWith("http:")) { - return new HttpDirectTemplateDownloader(url); + return new HttpDirectTemplateDownloader(url, connectTimeout, socketTimeout); } else if (url.toLowerCase().startsWith("nfs:")) { return new NfsDirectTemplateDownloader(url); } else if (url.toLowerCase().endsWith(".metalink")) { - return new MetalinkDirectTemplateDownloader(url); + return new MetalinkDirectTemplateDownloader(url, connectTimeout, socketTimeout); } else { throw new CloudRuntimeException(String.format("Cannot find a download checker for url: %s", url)); } } public static Long getFileSize(String url, String format) { - DirectTemplateDownloader checker = getCheckerDownloader(url); + DirectTemplateDownloader checker = getCheckerDownloader(url, null, null, null); + return checker.getRemoteFileSize(url, format); + } + + public static Long getFileSize(String url, String format, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) { + DirectTemplateDownloader checker = getCheckerDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout); return checker.getRemoteFileSize(url, format); } } diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java index 093f0604a44..e1b2f1fe429 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java @@ -50,8 +50,8 @@ public class HttpDirectTemplateDownloader extends DirectTemplateDownloaderImpl { protected GetMethod request; protected Map reqHeaders = new HashMap<>(); - protected HttpDirectTemplateDownloader(String url) { - this(url, null, null, null, null, null, null, null); + protected HttpDirectTemplateDownloader(String url, Integer connectTimeout, Integer socketTimeout) { + this(url, null, null, null, null, connectTimeout, socketTimeout, null); } public HttpDirectTemplateDownloader(String url, Long templateId, String destPoolPath, String checksum, diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java index 2035aab2aca..1bee45c477d 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java @@ -65,8 +65,8 @@ public class HttpsDirectTemplateDownloader extends DirectTemplateDownloaderImpl protected CloseableHttpClient httpsClient; private HttpUriRequest req; - protected HttpsDirectTemplateDownloader(String url) { - this(url, null, null, null, null, null, null, null, null); + protected HttpsDirectTemplateDownloader(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) { + this(url, null, null, null, null, connectTimeout, socketTimeout, connectionRequestTimeout, null); } public HttpsDirectTemplateDownloader(String url, Long templateId, String destPoolPath, String checksum, Map headers, diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java index 83802064cdf..06578d8c2b2 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java @@ -60,8 +60,8 @@ public class MetalinkDirectTemplateDownloader extends DirectTemplateDownloaderIm } } - protected MetalinkDirectTemplateDownloader(String url) { - this(url, null, null, null, null, null, null, null); + protected MetalinkDirectTemplateDownloader(String url, Integer connectTimeout, Integer socketTimeout) { + this(url, null, null, null, null, connectTimeout, socketTimeout, null); } public MetalinkDirectTemplateDownloader(String url, String destPoolPath, Long templateId, String checksum, diff --git a/core/src/test/java/org/apache/cloudstack/direct/download/BaseDirectTemplateDownloaderTest.java b/core/src/test/java/org/apache/cloudstack/direct/download/BaseDirectTemplateDownloaderTest.java index e4f1d8f4412..2c7245662a2 100644 --- a/core/src/test/java/org/apache/cloudstack/direct/download/BaseDirectTemplateDownloaderTest.java +++ b/core/src/test/java/org/apache/cloudstack/direct/download/BaseDirectTemplateDownloaderTest.java @@ -56,7 +56,7 @@ public class BaseDirectTemplateDownloaderTest { private HttpEntity httpEntity; @InjectMocks - protected HttpsDirectTemplateDownloader httpsDownloader = new HttpsDirectTemplateDownloader(httpUrl); + protected HttpsDirectTemplateDownloader httpsDownloader = new HttpsDirectTemplateDownloader(httpUrl, 1000, 1000, 1000); @Before public void init() throws IOException { diff --git a/core/src/test/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloaderTest.java b/core/src/test/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloaderTest.java index 9c6400bcdf4..68982fb915f 100644 --- a/core/src/test/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloaderTest.java +++ b/core/src/test/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloaderTest.java @@ -25,7 +25,8 @@ import org.mockito.InjectMocks; public class MetalinkDirectTemplateDownloaderTest extends BaseDirectTemplateDownloaderTest { @InjectMocks - protected MetalinkDirectTemplateDownloader metalinkDownloader = new MetalinkDirectTemplateDownloader(httpsUrl); + protected MetalinkDirectTemplateDownloader metalinkDownloader = new MetalinkDirectTemplateDownloader(httpsUrl, 1000, 1000); + @Test public void testCheckUrlMetalink() { metalinkDownloader.downloader = httpsDownloader; diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 2666cfadc70..15f5b231be2 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -79,7 +79,7 @@ public interface VolumeOrchestrationService { Long.class, "storage.max.volume.size", "2000", - "The maximum size for a volume (in GB).", + "The maximum size for a volume (in GiB).", true); VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/DirectoryService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/DirectoryService.java deleted file mode 100644 index e507d9964a1..00000000000 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/DirectoryService.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.service.api; - -import java.net.URI; -import java.util.List; - -import com.cloud.utils.component.PluggableService; - -public interface DirectoryService { - void registerService(String serviceName, URI endpoint); - - void unregisterService(String serviceName, URI endpoint); - - List getEndPoints(String serviceName); - - URI getLoadBalancedEndPoint(String serviceName); - - List listServices(); - -} diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/EntityService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/EntityService.java deleted file mode 100644 index fec0b9964af..00000000000 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/EntityService.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.service.api; - -import java.util.List; - -import javax.ws.rs.Path; - -import com.cloud.network.Network; -import com.cloud.storage.Volume; -import com.cloud.vm.VirtualMachine; - -/** - * Service to retrieve CloudStack entities - * very likely to change - */ -@Path("resources") -public interface EntityService { - List listVirtualMachines(); - - List listVolumes(); - - List listNetworks(); - - List listNics(); - - List listSnapshots(); - - List listTemplates(); - - List listStoragePools(); - - List listHosts(); - - VirtualMachine getVirtualMachine(String vm); - - Volume getVolume(String volume); - - Network getNetwork(String network); - -} diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OperationsServices.java b/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OperationsServices.java deleted file mode 100644 index 7b7abe83270..00000000000 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OperationsServices.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.service.api; - -import java.net.URL; -import java.util.List; - -import com.cloud.alert.Alert; - -public interface OperationsServices { -// List listJobs(); -// -// List listJobsInProgress(); -// -// List listJobsCompleted(); -// -// List listJobsCompleted(Long from); -// -// List listJobsInWaiting(); - - void cancelJob(String job); - - List listAlerts(); - - Alert getAlert(String uuid); - - void cancelAlert(String alert); - - void registerForAlerts(); - - String registerForEventNotifications(String type, String topic, URL url); - - boolean deregisterForEventNotifications(String notificationId); - - /** - * @return the list of event topics someone can register for - */ - List listEventTopics(); - -} diff --git a/engine/network/pom.xml b/engine/network/pom.xml deleted file mode 100644 index f0a3cd94748..00000000000 --- a/engine/network/pom.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - 4.0.0 - cloud-engine-network - Apache CloudStack Cloud Engine API - - org.apache.cloudstack - cloud-engine - 4.19.0.0-SNAPSHOT - ../pom.xml - - - - org.apache.cloudstack - cloud-engine-api - ${project.version} - - - org.apache.cloudstack - cloud-engine-components-api - ${project.version} - - - org.apache.cloudstack - cloud-framework-ipc - ${project.version} - - - diff --git a/engine/network/src/main/java/org/apache/cloudstack/network/NetworkOrchestrator.java b/engine/network/src/main/java/org/apache/cloudstack/network/NetworkOrchestrator.java deleted file mode 100644 index 8b6b6e431d3..00000000000 --- a/engine/network/src/main/java/org/apache/cloudstack/network/NetworkOrchestrator.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.network; - -public interface NetworkOrchestrator { - - /** - * Prepares for a VM to join a network - * @param vm vm - * @param reservationId reservation id - */ - void prepare(String vm, String reservationId); - - /** - * Release all reservation - */ - void release(String vm, String reservationId); - - /** - * Cancel a previous reservation - * @param reservationId - */ - void cancel(String reservationId); -} diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 9a100222894..36ec427735d 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -3760,7 +3760,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (cmd instanceof PingRoutingCommand) { final PingRoutingCommand ping = (PingRoutingCommand)cmd; if (ping.getHostVmStateReport() != null) { - _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport()); + _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand()); } scanStalledVMInTransitionStateOnUpHost(agentId); diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java index 152d0d889c6..b2a48a026a3 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java @@ -27,7 +27,7 @@ public interface VirtualMachinePowerStateSync { void processHostVmStateReport(long hostId, Map report); // to adapt legacy ping report - void processHostVmStatePingReport(long hostId, Map report); + void processHostVmStatePingReport(long hostId, Map report, boolean force); Map convertVmStateReport(Map states); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 815206a33bf..3eb3569cab0 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -55,19 +55,19 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat s_logger.debug("Process host VM state report. host: " + hostId); Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo); + processReport(hostId, translatedInfo, false); } @Override - public void processHostVmStatePingReport(long hostId, Map report) { + public void processHostVmStatePingReport(long hostId, Map report, boolean force) { if (s_logger.isDebugEnabled()) s_logger.debug("Process host VM state report from ping process. host: " + hostId); Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo); + processReport(hostId, translatedInfo, force); } - private void processReport(long hostId, Map translatedInfo) { + private void processReport(long hostId, Map translatedInfo, boolean force) { if (s_logger.isDebugEnabled()) { s_logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size()); @@ -117,7 +117,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat // Make sure powerState is up to date for missing VMs try { - if (!_instanceDao.isPowerStateUpToDate(instance.getId())) { + if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { s_logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId()); _instanceDao.resetVmPowerStateTracking(instance.getId()); continue; @@ -150,7 +150,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); - if (milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { + if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { s_logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period"); // this is were a race condition might have happened if we don't re-fetch the instance; diff --git a/engine/pom.xml b/engine/pom.xml index 91f430ba300..b2f41834403 100644 --- a/engine/pom.xml +++ b/engine/pom.xml @@ -47,7 +47,6 @@ api components-api - network orchestration schema service diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java index 61eb6cfd28e..df5a2283baa 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainDetailVO.java @@ -23,7 +23,6 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.utils.db.Encrypt; import org.apache.cloudstack.api.InternalIdentity; @Entity @@ -40,7 +39,6 @@ public class DomainDetailVO implements InternalIdentity { @Column(name = "name") private String name; - @Encrypt @Column(name = "value") private String value; diff --git a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java index 560f8dc79fd..5deb8586120 100644 --- a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java @@ -33,7 +33,6 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.TransactionLegacy; @Component public class ProjectDaoImpl extends GenericDaoBase implements ProjectDao { @@ -71,22 +70,8 @@ public class ProjectDaoImpl extends GenericDaoBase implements P @Override @DB public boolean remove(Long projectId) { - boolean result = false; - TransactionLegacy txn = TransactionLegacy.currentTxn(); - txn.start(); - ProjectVO projectToRemove = findById(projectId); - projectToRemove.setName(null); - if (!update(projectId, projectToRemove)) { - s_logger.warn("Failed to reset name for the project id=" + projectId + " as a part of project remove"); - return false; - } - _tagsDao.removeByIdAndType(projectId, ResourceObjectType.Project); - result = super.remove(projectId); - txn.commit(); - - return result; - + return super.remove(projectId); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java index 57f87d1f2cc..fd44e79e7cf 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java @@ -17,15 +17,19 @@ package com.cloud.upgrade.dao; import com.cloud.upgrade.SystemVmTemplateRegistration; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.DateUtil; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.log4j.Logger; +import org.jasypt.exceptions.EncryptionOperationNotPossibleException; import java.io.InputStream; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; @@ -34,6 +38,10 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate final static Logger LOG = Logger.getLogger(Upgrade41810to41900.class); private SystemVmTemplateRegistration systemVmTemplateRegistration; + private static final String ACCOUNT_DETAILS = "account_details"; + + private static final String DOMAIN_DETAILS = "domain_details"; + private final SimpleDateFormat[] formats = { new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"), new SimpleDateFormat("MM/dd/yyyy HH:mm:ss"), new SimpleDateFormat("dd/MM/yyyy HH:mm:ss"), new SimpleDateFormat("EEE MMM dd HH:mm:ss z yyyy")}; @@ -66,6 +74,7 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate @Override public void performDataMigration(Connection conn) { + decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(conn); migrateBackupDates(conn); } @@ -95,6 +104,37 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate } } + protected void decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(Connection conn) { + LOG.info("Decrypting global configuration values from the following tables: account_details and domain_details."); + + Map accountsMap = getConfigsWithScope(conn, ACCOUNT_DETAILS); + updateConfigValuesWithScope(conn, accountsMap, ACCOUNT_DETAILS); + LOG.info("Successfully decrypted configurations from account_details table."); + + Map domainsMap = getConfigsWithScope(conn, DOMAIN_DETAILS); + updateConfigValuesWithScope(conn, domainsMap, DOMAIN_DETAILS); + LOG.info("Successfully decrypted configurations from domain_details table."); + } + + protected Map getConfigsWithScope(Connection conn, String table) { + Map configsToBeUpdated = new HashMap<>(); + String selectDetails = String.format("SELECT details.id, details.value from cloud.%s details, cloud.configuration c " + + "WHERE details.name = c.name AND c.category NOT IN ('Hidden', 'Secure') AND details.value <> \"\" ORDER BY details.id;", table); + + try (PreparedStatement pstmt = conn.prepareStatement(selectDetails)) { + try (ResultSet result = pstmt.executeQuery()) { + while (result.next()) { + configsToBeUpdated.put(result.getLong("id"), result.getString("value")); + } + } + return configsToBeUpdated; + } catch (SQLException e) { + String message = String.format("Unable to retrieve data from table [%s] due to [%s].", table, e.getMessage()); + LOG.error(message, e); + throw new CloudRuntimeException(message, e); + } + } + public void migrateBackupDates(Connection conn) { LOG.info("Trying to convert backups' date column from varchar(255) to datetime type."); @@ -125,6 +165,27 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate } } + protected void updateConfigValuesWithScope(Connection conn, Map configsToBeUpdated, String table) { + String updateConfigValues = String.format("UPDATE cloud.%s SET value = ? WHERE id = ?;", table); + + for (Map.Entry config : configsToBeUpdated.entrySet()) { + try (PreparedStatement pstmt = conn.prepareStatement(updateConfigValues)) { + String decryptedValue = DBEncryptionUtil.decrypt(config.getValue()); + + pstmt.setString(1, decryptedValue); + pstmt.setLong(2, config.getKey()); + + LOG.info(String.format("Updating config with ID [%s] to value [%s].", config.getKey(), decryptedValue)); + pstmt.executeUpdate(); + } catch (SQLException | EncryptionOperationNotPossibleException e) { + String message = String.format("Unable to update config value with ID [%s] on table [%s] due to [%s]. The config value may already be decrypted.", + config.getKey(), table, e); + LOG.error(message); + throw new CloudRuntimeException(message, e); + } + } + } + private void fetchDatesAndMigrateToNewColumn(Connection conn) { String selectBackupDates = "SELECT `id`, `old_date` FROM `cloud`.`backups` WHERE 1;"; String date; diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java index 71ad765e618..863f6c96008 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailVO.java @@ -25,8 +25,6 @@ import javax.persistence.Table; import org.apache.cloudstack.api.InternalIdentity; -import com.cloud.utils.db.Encrypt; - @Entity @Table(name = "account_details") public class AccountDetailVO implements InternalIdentity { @@ -41,7 +39,6 @@ public class AccountDetailVO implements InternalIdentity { @Column(name = "name") private String name; - @Encrypt @Column(name = "value", length=4096) private String value; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql b/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql index f33fcb436d8..4532757d052 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql @@ -51,7 +51,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.capacity.standby', '10', 'The minimal number of command execution sessions that system is able to serve immediately(standby capacity)'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.cmd.execution.time.max', '30', 'The max command execution time in minute'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.session.max', '50', 'The max number of command execution sessions that a SSVM can handle'); -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Storage', 'DEFAULT', 'management-server', 'storage.max.volume.size', '2000', 'The maximum size for a volume (in GB).'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Storage', 'DEFAULT', 'management-server', 'storage.max.volume.size', '2000', 'The maximum size for a volume (in GiB).'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'task.cleanup.retry.interval', '600', 'Time (in seconds) to wait before retrying cleanup of tasks if the cleanup failed previously. 0 means to never retry.'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.additional.vnc.portrange.start', '50000', 'Start port number of additional VNC port range'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.percluster.host.max', '8', 'maxmium hosts per vCenter cluster(do not let it grow over 8)'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41810to41900.sql b/engine/schema/src/main/resources/META-INF/db/schema-41810to41900.sql index 100457ad590..a429c37d4d6 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41810to41900.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41810to41900.sql @@ -282,3 +282,6 @@ FROM `cloud`.`network_offering_details` AS `offering_details` ON `offering_details`.`network_offering_id` = `network_offerings`.`id` AND `offering_details`.`name`='internetProtocol' GROUP BY `network_offerings`.`id`; + +-- Set removed state for all removed accounts +UPDATE `cloud`.`account` SET state='removed' WHERE `removed` IS NOT NULL; diff --git a/engine/service/pom.xml b/engine/service/pom.xml index 64453d31713..8519fe9c0d1 100644 --- a/engine/service/pom.xml +++ b/engine/service/pom.xml @@ -54,11 +54,6 @@ cloud-engine-storage ${project.version} - - org.apache.cloudstack - cloud-engine-network - ${project.version} - org.springframework spring-context diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 2966f682048..f2b0f17232b 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -263,13 +263,9 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId()); - } + s_logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh)); if (avoid.shouldAvoid(pool)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool is in avoid set, skipping this pool"); - } + s_logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh)); return false; } @@ -297,6 +293,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } if (!checkDiskProvisioningSupport(dskCh, pool)) { + s_logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh, + "type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType"))); return false; } @@ -306,10 +304,12 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement Volume volume = volumeDao.findById(dskCh.getVolumeId()); if(!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) { + s_logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume)); return false; } if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) { + s_logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume)); return false; } @@ -317,14 +317,14 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement List> requestVolumeDiskProfilePairs = new ArrayList<>(); requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh)); if (dskCh.getHypervisorType() == HypervisorType.VMware) { - // Skip the parent datastore cluster, consider only child storage pools in it if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) { + s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume)); return false; } - // Skip the storage pool whose parent datastore cluster is not in UP state. if (pool.getParent() != 0L) { StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent()); if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) { + s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up)); return false; } } @@ -332,6 +332,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement try { boolean isStoragePoolStoragepolicyComplaince = storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool); if (!isStoragePoolStoragepolicyComplaince) { + s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume)); return false; } } catch (StorageUnavailableException e) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index e7c9b7e6f3d..fe49504fda1 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -100,9 +100,10 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat } StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool)); + s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { + s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); avoid.addPool(pool.getId()); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java index 4fbaa8c0e83..774c2229a09 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -82,9 +82,10 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (pool != null && pool.isLocal()) { StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool)); + s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { + s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); avoid.addPool(pool.getId()); } } @@ -107,8 +108,10 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { } StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { + s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { + s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); avoid.addPool(pool.getId()); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 2902871a9eb..1b3835560df 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -94,10 +94,11 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { } StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - LOGGER.trace(String.format("Found suitable local storage pool [%s], adding to list.", storage)); + LOGGER.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); suitablePools.add(storagePool); } else { if (canAddStoragePoolToAvoidSet(storage)) { + LOGGER.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", storagePool, dskCh)); avoid.addPool(storagePool.getId()); } } diff --git a/engine/userdata/pom.xml b/engine/userdata/pom.xml index 75475b2af18..e487af4f22d 100644 --- a/engine/userdata/pom.xml +++ b/engine/userdata/pom.xml @@ -46,8 +46,7 @@ org.apache.cloudstack cloud-engine-components-api - 4.19.0.0-SNAPSHOT - compile + ${project.version} diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java index 08ec9bfe83f..c705cc64072 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigurationVO.java @@ -170,7 +170,7 @@ public class ConfigurationVO implements Configuration { @Override public boolean isEncrypted() { - return "Hidden".equals(getCategory()) || "Secure".equals(getCategory()); + return StringUtils.equalsAny(getCategory(), "Hidden", "Secure"); } @Override diff --git a/framework/db/src/main/java/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java b/framework/db/src/main/java/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java index 88830b3e3f9..961c537d0da 100644 --- a/framework/db/src/main/java/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java +++ b/framework/db/src/main/java/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java @@ -475,6 +475,8 @@ public class EncryptionSecretKeyChanger { // migrate resource details values migrateHostDetails(conn); + migrateEncryptedAccountDetails(conn); + migrateEncryptedDomainDetails(conn); migrateClusterDetails(conn); migrateImageStoreDetails(conn); migrateStoragePoolDetails(conn); @@ -497,6 +499,30 @@ public class EncryptionSecretKeyChanger { return true; } + private void migrateEncryptedAccountDetails(Connection conn) { + System.out.println("Beginning migration of account_details encrypted values"); + + String tableName = "account_details"; + String selectSql = "SELECT details.id, details.value from account_details details, cloud.configuration c " + + "WHERE details.name = c.name AND c.category IN ('Hidden', 'Secure') AND details.value <> \"\" ORDER BY details.id;"; + String updateSql = "UPDATE cloud.account_details SET value = ? WHERE id = ?;"; + migrateValueAndUpdateDatabaseById(conn, tableName, selectSql, updateSql, false); + + System.out.println("End migration of account details values"); + } + + private void migrateEncryptedDomainDetails(Connection conn) { + System.out.println("Beginning migration of domain_details encrypted values"); + + String tableName = "domain_details"; + String selectSql = "SELECT details.id, details.value from domain_details details, cloud.configuration c " + + "WHERE details.name = c.name AND c.category IN ('Hidden', 'Secure') AND details.value <> \"\" ORDER BY details.id;"; + String updateSql = "UPDATE cloud.domain_details SET value = ? WHERE id = ?;"; + migrateValueAndUpdateDatabaseById(conn, tableName, selectSql, updateSql, false); + + System.out.println("End migration of domain details values"); + } + protected String migrateValue(String value) { if (StringUtils.isEmpty(value)) { return value; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index d5c0569ca9a..16cccb0150b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -83,6 +83,7 @@ import org.libvirt.DomainInfo; import org.libvirt.DomainInfo.DomainState; import org.libvirt.DomainInterfaceStats; import org.libvirt.DomainSnapshot; +import org.libvirt.Library; import org.libvirt.LibvirtException; import org.libvirt.MemoryStatistic; import org.libvirt.Network; @@ -90,6 +91,9 @@ import org.libvirt.SchedParameter; import org.libvirt.SchedUlongParameter; import org.libvirt.Secret; import org.libvirt.VcpuInfo; +import org.libvirt.event.DomainEvent; +import org.libvirt.event.DomainEventDetail; +import org.libvirt.event.StoppedDetail; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -97,6 +101,7 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; import org.xml.sax.SAXException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.HostVmStateReportEntry; @@ -175,6 +180,8 @@ import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.IsolationType; import com.cloud.network.Networks.RouterPrivateIpStrategy; import com.cloud.network.Networks.TrafficType; +import com.cloud.resource.AgentStatusUpdater; +import com.cloud.resource.ResourceStatusUpdater; import com.cloud.resource.RequestWrapper; import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResourceBase; @@ -224,11 +231,12 @@ import com.google.gson.Gson; * private mac addresses for domrs | mac address | start + 126 || || * pool | the parent of the storage pool hierarchy * } **/ -public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer { +public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer, ResourceStatusUpdater { protected static Logger s_logger = Logger.getLogger(LibvirtComputingResource.class); private static final String CONFIG_VALUES_SEPARATOR = ","; + private static final String LEGACY = "legacy"; private static final String SECURE = "secure"; @@ -457,6 +465,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv protected CPUStat cpuStat = new CPUStat(); protected MemStat memStat = new MemStat(dom0MinMem, dom0OvercommitMem); private final LibvirtUtilitiesHelper libvirtUtilitiesHelper = new LibvirtUtilitiesHelper(); + private AgentStatusUpdater _agentStatusUpdater; protected Boolean enableManuallySettingCpuTopologyOnKvmVm = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM); @@ -481,6 +490,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return hypervisorQemuVersion; } + @Override + public void registerStatusUpdater(AgentStatusUpdater updater) { + _agentStatusUpdater = updater; + } + @Override public ExecutionResult executeInVR(final String routerIp, final String script, final String args) { return executeInVR(routerIp, script, args, timeout); @@ -3590,9 +3604,63 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } catch (final CloudRuntimeException e) { s_logger.debug("Unable to initialize local storage pool: " + e); } + setupLibvirtEventListener(); return sscmd; } + private void setupLibvirtEventListener() { + final Thread libvirtListenerThread = new Thread(() -> { + try { + Library.runEventLoop(); + } catch (LibvirtException e) { + s_logger.error("LibvirtException was thrown in event loop: ", e); + } catch (InterruptedException e) { + s_logger.error("Libvirt event loop was interrupted: ", e); + } + }); + + try { + libvirtListenerThread.setDaemon(true); + libvirtListenerThread.start(); + + Connect conn = LibvirtConnection.getConnection(); + conn.addLifecycleListener(this::onDomainLifecycleChange); + + s_logger.debug("Set up the libvirt domain event lifecycle listener"); + } catch (LibvirtException e) { + s_logger.error("Failed to get libvirt connection for domain event lifecycle", e); + } + } + + private int onDomainLifecycleChange(Domain domain, DomainEvent domainEvent) { + try { + s_logger.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent)); + if (domainEvent != null) { + switch (domainEvent.getType()) { + case STOPPED: + /* libvirt-destroyed VMs have detail StoppedDetail.DESTROYED, self shutdown guests are StoppedDetail.SHUTDOWN + * Checking for this helps us differentiate between events where cloudstack or admin stopped the VM vs guest + * initiated, and avoid pushing extra updates for actions we are initiating without a need for extra tracking */ + DomainEventDetail detail = domainEvent.getDetail(); + if (StoppedDetail.SHUTDOWN.equals(detail) || StoppedDetail.CRASHED.equals(detail)) { + s_logger.info("Triggering out of band status update due to completed self-shutdown or crash of VM"); + _agentStatusUpdater.triggerUpdate(); + } else { + s_logger.debug("Event detail: " + detail); + } + break; + default: + s_logger.debug(String.format("No handling for event %s", domainEvent)); + } + } + } catch (LibvirtException e) { + s_logger.error("Libvirt exception while processing lifecycle event", e); + } catch (Throwable e) { + s_logger.error("Error during lifecycle", e); + } + return 0; + } + public String diskUuidToSerial(String uuid) { String uuidWithoutHyphen = uuid.replace("-",""); return uuidWithoutHyphen.substring(0, Math.min(uuidWithoutHyphen.length(), 20)); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java index 935bc8e113b..5faad5633f3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java @@ -35,11 +35,16 @@ public class LibvirtCheckUrlCommand extends CommandWrapper 0) { + resizeResource(linstorApi, rscName, volumeInfo.getSize()); + } + applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName()); applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops()); @@ -738,26 +756,16 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver dfm.setSizeKib(resizeParameter.newSize / 1024); try { + resizeResource(api, rscName, resizeParameter.newSize); + applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops); { final VolumeVO volume = _volumeDao.findById(vol.getId()); volume.setMinIops(resizeParameter.newMinIops); volume.setMaxIops(resizeParameter.newMaxIops); + volume.setSize(resizeParameter.newSize); _volumeDao.update(volume.getId(), volume); } - - ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, dfm); - if (answers.hasError()) - { - s_logger.error("Resize error: " + answers.get(0).getMessage()); - errMsg = answers.get(0).getMessage(); - } else - { - s_logger.info(String.format("Successfully resized %s to %d kib", rscName, dfm.getSizeKib())); - vol.setSize(resizeParameter.newSize); - vol.update(); - } - } catch (ApiException apiExc) { s_logger.error(apiExc); @@ -765,12 +773,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } CreateCmdResult result; - if (errMsg != null) - { + if (errMsg != null) { result = new CreateCmdResult(null, new Answer(null, false, errMsg)); result.setResult(errMsg); - } else - { + } else { // notify guests result = notifyResize(vol, oldSize, resizeParameter); } diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 4ba6375bc50..6d4972573ae 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -1639,6 +1639,12 @@ public class ApiResponseHelper implements ResponseGenerator { vmResponse.setCreated(vm.getCreated()); vmResponse.setHypervisor(vm.getHypervisorType().getHypervisorDisplayName()); + ServiceOffering serviceOffering = ApiDBUtils.findServiceOfferingById(vm.getServiceOfferingId()); + if (serviceOffering != null) { + vmResponse.setServiceOfferingId(serviceOffering.getUuid()); + vmResponse.setServiceOfferingName(serviceOffering.getName()); + } + if (vm.getHostId() != null) { Host host = ApiDBUtils.findHostById(vm.getHostId()); if (host != null) { diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 50c8f0b435f..1808230d885 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -3786,6 +3786,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q } } else if (templateFilter == TemplateFilter.sharedexecutable || templateFilter == TemplateFilter.shared) { // only show templates shared by others + if (permittedAccounts.isEmpty()) { + return new Pair<>(new ArrayList<>(), 0); + } sc.addAnd("sharedAccountId", SearchCriteria.Op.IN, permittedAccountIds.toArray()); } else if (templateFilter == TemplateFilter.executable) { SearchCriteria scc = _templateJoinDao.createSearchCriteria(); diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 5e0fd8d4196..b776d34fae9 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -49,6 +49,7 @@ import javax.naming.ConfigurationException; import com.cloud.hypervisor.HypervisorGuru; import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.element.NsxProviderVO; +import com.cloud.utils.crypt.DBEncryptionUtil; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -668,7 +669,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB - public String updateConfiguration(final long userId, final String name, final String category, final String value, final String scope, final Long resourceId) { + public String updateConfiguration(final long userId, final String name, final String category, String value, final String scope, final Long resourceId) { final String validationMsg = validateConfigurationValue(name, value, scope); if (validationMsg != null) { @@ -681,6 +682,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // if scope is mentioned as global or not mentioned then it is normal // global parameter updation if (scope != null && !scope.isEmpty() && !ConfigKey.Scope.Global.toString().equalsIgnoreCase(scope)) { + boolean valueEncrypted = shouldEncryptValue(category); + if (valueEncrypted) { + value = DBEncryptionUtil.encrypt(value); + } + switch (ConfigKey.Scope.valueOf(scope)) { case Zone: final DataCenterVO zone = _zoneDao.findById(resourceId); @@ -771,7 +777,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati default: throw new InvalidParameterValueException("Scope provided is invalid"); } - return value; + + return valueEncrypted ? DBEncryptionUtil.decrypt(value) : value; } // Execute all updates in a single transaction @@ -868,6 +875,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return _configDao.getValue(name); } + private boolean shouldEncryptValue(String category) { + return StringUtils.equalsAny(category, "Hidden", "Secure"); + } + /** * Updates the 'hypervisor.list' value to match the new custom hypervisor name set as newValue if the previous value was set */ @@ -894,10 +905,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Long imageStoreId = cmd.getImageStoreId(); Long accountId = cmd.getAccountId(); Long domainId = cmd.getDomainId(); - CallContext.current().setEventDetails(" Name: " + name + " New Value: " + (name.toLowerCase().contains("password") ? "*****" : value == null ? "" : value)); // check if config value exists final ConfigurationVO config = _configDao.findByName(name); - String catergory = null; + String category = null; + String eventValue = encryptEventValueIfConfigIsEncrypted(config, value); + CallContext.current().setEventDetails(String.format(" Name: %s New Value: %s", name, eventValue)); final Account caller = CallContext.current().getCallingAccount(); if (_accountMgr.isDomainAdmin(caller.getId())) { @@ -916,9 +928,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati s_logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); throw new InvalidParameterValueException("Config parameter with name " + name + " doesn't exist"); } - catergory = _configDepot.get(name).category(); + category = _configDepot.get(name).category(); } else { - catergory = config.getCategory(); + category = config.getCategory(); } validateIpAddressRelatedConfigValues(name, value); @@ -975,7 +987,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati value = (id == null) ? null : ""; } - final String updatedValue = updateConfiguration(userId, name, catergory, value, scope, id); + final String updatedValue = updateConfiguration(userId, name, category, value, scope, id); if (value == null && updatedValue == null || updatedValue.equalsIgnoreCase(value)) { return _configDao.findByName(name); } else { @@ -983,6 +995,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } + private String encryptEventValueIfConfigIsEncrypted(ConfigurationVO config, String value) { + if (config != null && config.isEncrypted()) { + return "*****"; + } + return Objects.requireNonNullElse(value, ""); + } + private ParamCountPair getParamCount(Map scopeMap) { Long id = null; int paramCount = 0; diff --git a/server/src/main/java/com/cloud/hypervisor/discoverer/CustomServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/discoverer/CustomServerDiscoverer.java index 534947f092e..e5ef78305ea 100644 --- a/server/src/main/java/com/cloud/hypervisor/discoverer/CustomServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/discoverer/CustomServerDiscoverer.java @@ -29,4 +29,9 @@ public class CustomServerDiscoverer extends LibvirtServerDiscoverer { protected String getPatchPath() { return "scripts/vm/hypervisor/kvm/"; } + + @Override + public void processHostAdded(long hostId) { + // Not using super class implementation here. + } } diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index 440961dd7b0..e9f0d5f58e4 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -110,7 +110,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements @Override public void processHostAdded(long hostId) { HostVO host = hostDao.findById(hostId); - if (host != null) { + if (host != null && getHypervisorType().equals(host.getHypervisorType())) { directDownloadManager.syncCertificatesToHost(hostId, host.getDataCenterId()); } } diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index afa3a0c7e41..75298b73e64 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -1001,7 +1001,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio final Map defaultNSXNetworkOfferingProviders = new HashMap<>(); defaultNSXNetworkOfferingProviders.put(Service.Dhcp, Provider.VPCVirtualRouter); - defaultNSXNetworkOfferingProviders.put(Service.Dns, Provider.VPCVirtualRouter); + defaultNSXNetworkOfferingProviders.put(Service.Dns, Provider.VPCVirtualRouter ); defaultNSXNetworkOfferingProviders.put(Service.SourceNat, Provider.Nsx); defaultNSXNetworkOfferingProviders.put(Service.UserData, Provider.VPCVirtualRouter); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index f4331aa5041..4bdb8f2861e 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -2388,6 +2388,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean storagePoolHasEnoughIops(List> requestedVolumes, StoragePool pool) { if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null) { + s_logger.debug(String.format("Cannot check if storage [%s] has enough IOPS to allocate volumes [%s].", pool, requestedVolumes)); return false; } @@ -2418,8 +2419,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } long futureIops = currentIops + requestedIops; - - return futureIops <= pool.getCapacityIops(); + boolean hasEnoughIops = futureIops <= pool.getCapacityIops(); + String hasCapacity = hasEnoughIops ? "has" : "does not have"; + s_logger.debug(String.format("Pool [%s] %s enough IOPS to allocate volumes [%s].", pool, hasCapacity, requestedVolumes)); + return hasEnoughIops; } @Override @@ -2430,10 +2433,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean storagePoolHasEnoughSpace(List> volumeDiskProfilesList, StoragePool pool, Long clusterId) { if (CollectionUtils.isEmpty(volumeDiskProfilesList)) { + s_logger.debug(String.format("Cannot check if pool [%s] has enough space to allocate volumes because the volumes list is empty.", pool)); return false; } if (!checkUsagedSpace(pool)) { + s_logger.debug(String.format("Cannot allocate pool [%s] because there is not enough space in this pool.", pool)); return false; } @@ -2696,30 +2701,34 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volume) { if (pool == null || volume == null) { + s_logger.debug(String.format("Cannot check if storage pool [%s] is compatible with volume [%s].", pool, volume)); return false; } if (volume.getPoolId() == null) { - // Volume is not allocated to any pool. Not possible to check compatibility with other pool, let it try + s_logger.debug(String.format("Volume [%s] is not allocated to any pool. Cannot check compatibility with pool [%s].", volume, pool)); return true; } StoragePool volumePool = _storagePoolDao.findById(volume.getPoolId()); if (volumePool == null) { - // Volume pool doesn't exist. Not possible to check compatibility with other pool, let it try + s_logger.debug(String.format("Pool [%s] used by volume [%s] does not exist. Cannot check compatibility.", pool, volume)); return true; } if (volume.getState() == Volume.State.Ready) { if (volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex && pool.getPoolType() != Storage.StoragePoolType.PowerFlex) { + s_logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); return false; } else if (volumePool.getPoolType() != Storage.StoragePoolType.PowerFlex && pool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + s_logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); return false; } } else { + s_logger.debug(String.format("Cannot check compatibility of pool [%s] because volume [%s] is not in [%s] state.", pool, volume, Volume.State.Ready)); return false; } - + s_logger.debug(String.format("Pool [%s] is compatible with volume [%s].", pool, volume)); return true; } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 3f45505bcc5..aed6a83f0a3 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -39,6 +39,7 @@ import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -168,7 +169,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (host == null) { throw new CloudRuntimeException("Couldn't find a host to validate URL " + url); } - CheckUrlCommand cmd = new CheckUrlCommand(format, url); + Integer socketTimeout = DirectDownloadManager.DirectDownloadSocketTimeout.value(); + Integer connectRequestTimeout = DirectDownloadManager.DirectDownloadConnectionRequestTimeout.value(); + Integer connectTimeout = DirectDownloadManager.DirectDownloadConnectTimeout.value(); + CheckUrlCommand cmd = new CheckUrlCommand(format, url, connectTimeout, connectRequestTimeout, socketTimeout); s_logger.debug("Performing URL " + url + " validation on host " + host.getId()); Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 3684657faec..04785077936 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -814,6 +814,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return false; } + account.setState(State.REMOVED); + _accountDao.update(accountId, account); + if (s_logger.isDebugEnabled()) { s_logger.debug("Removed account " + accountId); } diff --git a/server/src/main/java/com/cloud/user/PasswordPolicy.java b/server/src/main/java/com/cloud/user/PasswordPolicy.java index 42ef7b475ef..c5278baa737 100644 --- a/server/src/main/java/com/cloud/user/PasswordPolicy.java +++ b/server/src/main/java/com/cloud/user/PasswordPolicy.java @@ -25,7 +25,8 @@ public interface PasswordPolicy { Integer.class, "password.policy.minimum.special.characters", "0", - "Minimum number of special characters that the user's password must have. The value 0 means the user's password does not require any special characters.", + "Minimum number of special characters that the user's password must have. Any character that is neither a letter nor numeric is considered special. " + + "The value 0 means the user's password does not require any special characters.", true, ConfigKey.Scope.Domain); @@ -43,7 +44,7 @@ public interface PasswordPolicy { Integer.class, "password.policy.minimum.uppercase.letters", "0", - "Minimum number of uppercase letters that the user's password must have. The value 0 means the user's password does not require any uppercase letters.", + "Minimum number of uppercase letters [A-Z] that the user's password must have. The value 0 means the user's password does not require any uppercase letters.", true, ConfigKey.Scope.Domain); @@ -52,7 +53,7 @@ public interface PasswordPolicy { Integer.class, "password.policy.minimum.lowercase.letters", "0", - "Minimum number of lowercase letters that the user's password must have. The value 0 means the user's password does not require any lowercase letters.", + "Minimum number of lowercase letters [a-z] that the user's password must have. The value 0 means the user's password does not require any lowercase letters.", true, ConfigKey.Scope.Domain); @@ -61,7 +62,7 @@ public interface PasswordPolicy { Integer.class, "password.policy.minimum.digits", "0", - "Minimum number of digits that the user's password must have. The value 0 means the user's password does not require any digits.", + "Minimum number of numeric characters [0-9] that the user's password must have. The value 0 means the user's password does not require any numeric characters.", true, ConfigKey.Scope.Domain); diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index eab1e98c800..12665a7db7b 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -58,7 +58,6 @@ import com.cloud.agent.api.GetUnmanagedInstancesAnswer; import com.cloud.agent.api.GetUnmanagedInstancesCommand; import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; -import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.configuration.Resource; import com.cloud.dc.DataCenter; @@ -121,6 +120,7 @@ import com.cloud.user.ResourceLimitService; import com.cloud.user.UserVO; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; +import com.cloud.utils.LogUtils; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @@ -138,7 +138,6 @@ import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.google.gson.Gson; public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @@ -186,8 +185,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Inject private VMInstanceDao vmDao; @Inject - private CapacityManager capacityManager; - @Inject private VolumeApiService volumeApiService; @Inject private DeploymentPlanningManager deploymentPlanningManager; @@ -206,8 +203,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Inject private GuestOSHypervisorDao guestOSHypervisorDao; @Inject - private VMSnapshotDao vmSnapshotDao; - @Inject private SnapshotDao snapshotDao; @Inject private UserVmDao userVmDao; @@ -335,7 +330,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { String[] split = path.split(" "); path = split[split.length - 1]; split = path.split("/"); - ; path = split[split.length - 1]; split = path.split("\\."); path = split[0]; @@ -387,26 +381,29 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { private ServiceOfferingVO getUnmanagedInstanceServiceOffering(final UnmanagedInstanceTO instance, ServiceOfferingVO serviceOffering, final Account owner, final DataCenter zone, final Map details) throws ServerApiException, PermissionDeniedException, ResourceAllocationException { if (instance == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM is not valid")); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Cannot find VM to import."); } if (serviceOffering == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Service offering is not valid")); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Cannot find service offering used to import VM [%s].", instance.getName())); } accountService.checkAccess(owner, serviceOffering, zone); final Integer cpu = instance.getCpuCores(); final Integer memory = instance.getMemory(); Integer cpuSpeed = instance.getCpuSpeed() == null ? 0 : instance.getCpuSpeed(); + if (cpu == null || cpu == 0) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("CPU cores for VM (%s) not valid", instance.getName())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("CPU cores [%s] is not valid for importing VM [%s].", cpu, instance.getName())); } if (memory == null || memory == 0) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Memory for VM (%s) not valid", instance.getName())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Memory [%s] is not valid for importing VM [%s].", memory, instance.getName())); } + if (serviceOffering.isDynamic()) { if (details.containsKey(VmDetailConstants.CPU_SPEED)) { try { cpuSpeed = Integer.parseInt(details.get(VmDetailConstants.CPU_SPEED)); } catch (Exception e) { + LOGGER.error(String.format("Failed to get CPU speed for importing VM [%s] due to [%s].", instance.getName(), e.getMessage()), e); } } Map parameters = new HashMap<>(); @@ -429,8 +426,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Service offering (%s) %dMHz CPU speed does not match VM CPU speed %dMHz and VM is not in powered off state (Power state: %s)", serviceOffering.getUuid(), serviceOffering.getSpeed(), cpuSpeed, instance.getPowerState())); } } - resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.cpu, new Long(serviceOffering.getCpu())); - resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.memory, new Long(serviceOffering.getRamSize())); + resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.cpu, Long.valueOf(serviceOffering.getCpu())); + resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.memory, Long.valueOf(serviceOffering.getRamSize())); return serviceOffering; } @@ -520,10 +517,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return new Pair<>(rootDisk, dataDisks); } - private void checkUnmanagedDiskAndOfferingForImport(UnmanagedInstanceTO.Disk disk, DiskOffering diskOffering, ServiceOffering serviceOffering, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed) + private void checkUnmanagedDiskAndOfferingForImport(String instanceName, UnmanagedInstanceTO.Disk disk, DiskOffering diskOffering, ServiceOffering serviceOffering, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed) throws ServerApiException, PermissionDeniedException, ResourceAllocationException { if (serviceOffering == null && diskOffering == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID: %s not found during VM import", disk.getDiskId())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID [%s] not found during VM [%s] import.", disk.getDiskId(), instanceName)); } if (diskOffering != null) { accountService.checkAccess(owner, diskOffering, zone); @@ -544,15 +541,15 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } } - private void checkUnmanagedDiskAndOfferingForImport(List disks, final Map diskOfferingMap, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed) + private void checkUnmanagedDiskAndOfferingForImport(String intanceName, List disks, final Map diskOfferingMap, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed) throws ServerApiException, PermissionDeniedException, ResourceAllocationException { String diskController = null; for (UnmanagedInstanceTO.Disk disk : disks) { if (disk == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve disk details for VM")); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve disk details for VM [%s].", intanceName)); } if (!diskOfferingMap.containsKey(disk.getDiskId())) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID: %s not found during VM import", disk.getDiskId())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID [%s] not found during VM import.", disk.getDiskId())); } if (StringUtils.isEmpty(diskController)) { diskController = disk.getController(); @@ -561,17 +558,12 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Multiple data disk controllers of different type (%s, %s) are not supported for import. Please make sure that all data disk controllers are of the same type", diskController, disk.getController())); } } - checkUnmanagedDiskAndOfferingForImport(disk, diskOfferingDao.findById(diskOfferingMap.get(disk.getDiskId())), null, owner, zone, cluster, migrateAllowed); + checkUnmanagedDiskAndOfferingForImport(intanceName, disk, diskOfferingDao.findById(diskOfferingMap.get(disk.getDiskId())), null, owner, zone, cluster, migrateAllowed); } } - private void checkUnmanagedNicAndNetworkForImport(UnmanagedInstanceTO.Nic nic, Network network, final DataCenter zone, final Account owner, final boolean autoAssign) throws ServerApiException { - if (nic == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve NIC details during VM import")); - } - if (network == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import", nic.getNicId())); - } + private void checkUnmanagedNicAndNetworkForImport(String instanceName, UnmanagedInstanceTO.Nic nic, Network network, final DataCenter zone, final Account owner, final boolean autoAssign) throws ServerApiException { + basicNetworkChecks(instanceName, nic, network); if (network.getDataCenterId() != zone.getId()) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network(ID: %s) for nic(ID: %s) belongs to a different zone than VM to be imported", network.getUuid(), nic.getNicId())); } @@ -588,34 +580,31 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } String pvLanType = nic.getPvlanType() == null ? "" : nic.getPvlanType().toLowerCase().substring(0, 1); if (nic.getVlan() != null && nic.getVlan() != 0 && nic.getPvlan() != null && nic.getPvlan() != 0 && - (StringUtils.isEmpty(network.getBroadcastUri().toString()) || - !networkBroadcastUri.equals(String.format("pvlan://%d-%s%d", nic.getVlan(), pvLanType, nic.getPvlan())))) { + (StringUtils.isEmpty(networkBroadcastUri) || !String.format("pvlan://%d-%s%d", nic.getVlan(), pvLanType, nic.getPvlan()).equals(networkBroadcastUri))) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("PVLAN of network(ID: %s) %s is found different from the VLAN of nic(ID: %s) pvlan://%d-%s%d during VM import", network.getUuid(), networkBroadcastUri, nic.getNicId(), nic.getVlan(), pvLanType, nic.getPvlan())); } } - private void checkUnmanagedNicAndNetworkHostnameForImport(UnmanagedInstanceTO.Nic nic, Network network, final String hostName) throws ServerApiException { + private void basicNetworkChecks(String instanceName, UnmanagedInstanceTO.Nic nic, Network network) { if (nic == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve NIC details during VM import")); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve the NIC details used by VM [%s] from VMware. Please check if this VM have NICs in VMWare.", instanceName)); } if (network == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import", nic.getNicId())); - } - // Check for duplicate hostname in network, get all vms hostNames in the network - List hostNames = vmDao.listDistinctHostNames(network.getId()); - if (CollectionUtils.isNotEmpty(hostNames) && hostNames.contains(hostName)) { - throw new InvalidParameterValueException("The vm with hostName " + hostName + " already exists in the network domain: " + network.getNetworkDomain() + "; network=" - + network); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import.", nic.getNicId())); } } - private void checkUnmanagedNicIpAndNetworkForImport(UnmanagedInstanceTO.Nic nic, Network network, final Network.IpAddresses ipAddresses) throws ServerApiException { - if (nic == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve NIC details during VM import")); - } - if (network == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import", nic.getNicId())); + private void checkUnmanagedNicAndNetworkHostnameForImport(String instanceName, UnmanagedInstanceTO.Nic nic, Network network, final String hostName) throws ServerApiException { + basicNetworkChecks(instanceName, nic, network); + // Check for duplicate hostname in network, get all vms hostNames in the network + List hostNames = vmDao.listDistinctHostNames(network.getId()); + if (CollectionUtils.isNotEmpty(hostNames) && hostNames.contains(hostName)) { + throw new InvalidParameterValueException(String.format("VM with Name [%s] already exists in the network [%s] domain [%s]. Cannot import another VM with the same name. Pleasy try again with a different name.", hostName, network, network.getNetworkDomain())); } + } + + private void checkUnmanagedNicIpAndNetworkForImport(String instanceName, UnmanagedInstanceTO.Nic nic, Network network, final Network.IpAddresses ipAddresses) throws ServerApiException { + basicNetworkChecks(instanceName, nic, network); // Check IP is assigned for non L2 networks if (!network.getGuestType().equals(Network.GuestType.L2) && (ipAddresses == null || StringUtils.isEmpty(ipAddresses.getIp4Address()))) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("NIC(ID: %s) needs a valid IP address for it to be associated with network(ID: %s). %s parameter of API can be used for this", nic.getNicId(), network.getUuid(), ApiConstants.NIC_IP_ADDRESS_LIST)); @@ -629,7 +618,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } } - private Map getUnmanagedNicNetworkMap(List nics, final Map callerNicNetworkMap, final Map callerNicIpAddressMap, final DataCenter zone, final String hostName, final Account owner) throws ServerApiException { + private Map getUnmanagedNicNetworkMap(String instanceName, List nics, final Map callerNicNetworkMap, final Map callerNicIpAddressMap, final DataCenter zone, final String hostName, final Account owner) throws ServerApiException { Map nicNetworkMap = new HashMap<>(); String nicAdapter = null; for (UnmanagedInstanceTO.Nic nic : nics) { @@ -654,22 +643,23 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { continue; } try { - checkUnmanagedNicAndNetworkForImport(nic, networkVO, zone, owner, true); + checkUnmanagedNicAndNetworkForImport(instanceName, nic, networkVO, zone, owner, true); network = networkVO; } catch (Exception e) { + LOGGER.error(String.format("Error when checking NIC [%s] of unmanaged instance to import due to [%s]." , nic.getNicId(), e.getMessage()), e); } if (network != null) { - checkUnmanagedNicAndNetworkHostnameForImport(nic, network, hostName); - checkUnmanagedNicIpAndNetworkForImport(nic, network, ipAddresses); + checkUnmanagedNicAndNetworkHostnameForImport(instanceName, nic, network, hostName); + checkUnmanagedNicIpAndNetworkForImport(instanceName, nic, network, ipAddresses); break; } } } } else { network = networkDao.findById(callerNicNetworkMap.get(nic.getNicId())); - checkUnmanagedNicAndNetworkForImport(nic, network, zone, owner, false); - checkUnmanagedNicAndNetworkHostnameForImport(nic, network, hostName); - checkUnmanagedNicIpAndNetworkForImport(nic, network, ipAddresses); + checkUnmanagedNicAndNetworkForImport(instanceName, nic, network, zone, owner, false); + checkUnmanagedNicAndNetworkHostnameForImport(instanceName, nic, network, hostName); + checkUnmanagedNicIpAndNetworkForImport(instanceName, nic, network, ipAddresses); } if (network == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Suitable network for nic(ID: %s) not found during VM import", nic.getNicId())); @@ -745,14 +735,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - LOGGER.warn(String.format("VM import failed for unmanaged vm: %s during vm migration, finding deployment destination", vm.getInstanceName()), e); + String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm.getInstanceName(), e.getMessage()); + LOGGER.warn(errorMsg, e); cleanupFailedImportVM(vm); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration, finding deployment destination", vm.getInstanceName())); - } - if (dest != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(" Found " + dest + " for migrating the vm to"); - } + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } if (dest == null) { cleanupFailedImportVM(vm); @@ -769,9 +755,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } vm = userVmManager.getUserVm(vm.getId()); } catch (Exception e) { - LOGGER.error(String.format("VM import failed for unmanaged vm: %s during vm migration", vm.getInstanceName()), e); + String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm.getInstanceName(), e.getMessage()); + LOGGER.error(errorMsg, e); cleanupFailedImportVM(vm); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration. %s", userVm.getInstanceName(), e.getMessage())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } } for (Pair diskProfileStoragePool : diskProfileStoragePoolList) { @@ -857,9 +844,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOfferingVO serviceOfferingVO) { if (userVm == null || serviceOfferingVO == null) { - LOGGER.error("Failed to publish usage records during VM import"); + LOGGER.error(String.format("Failed to publish usage records during VM import because VM [%s] or ServiceOffering [%s] is null.", userVm, serviceOfferingVO)); cleanupFailedImportVM(userVm); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm during publishing usage records")); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "VM import failed for Unmanaged VM during publishing Usage Records."); } try { if (!serviceOfferingVO.isDynamic()) { @@ -874,13 +861,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { userVm.getHypervisorType().toString(), VirtualMachine.class.getName(), userVm.getUuid(), userVm.isDisplayVm()); } } catch (Exception e) { - LOGGER.error(String.format("Failed to publish usage records during VM import for unmanaged vm %s", userVm.getInstanceName()), e); + LOGGER.error(String.format("Failed to publish usage records during VM import for unmanaged VM [%s] due to [%s].", userVm.getInstanceName(), e.getMessage()), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm %s during publishing usage records", userVm.getInstanceName())); } resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.user_vm, userVm.isDisplayVm()); - resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.cpu, userVm.isDisplayVm(), new Long(serviceOfferingVO.getCpu())); - resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.memory, userVm.isDisplayVm(), new Long(serviceOfferingVO.getRamSize())); + resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.cpu, userVm.isDisplayVm(), Long.valueOf(serviceOfferingVO.getCpu())); + resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.memory, userVm.isDisplayVm(), Long.valueOf(serviceOfferingVO.getRamSize())); // Save usage event and update resource count for user vm volumes List volumes = volumeDao.findByInstance(userVm.getId()); for (VolumeVO volume : volumes) { @@ -911,14 +898,17 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { final ServiceOfferingVO serviceOffering, final Map dataDiskOfferingMap, final Map nicNetworkMap, final Map callerNicIpAddressMap, final Map details, final boolean migrateAllowed, final boolean forced) { + LOGGER.debug(LogUtils.logGsonWithoutException("Trying to import VM [%s] with name [%s], in zone [%s], cluster [%s], and host [%s], using template [%s], service offering [%s], disks map [%s], NICs map [%s] and details [%s].", + unmanagedInstance, instanceName, zone, cluster, host, template, serviceOffering, dataDiskOfferingMap, nicNetworkMap, details)); UserVm userVm = null; ServiceOfferingVO validatedServiceOffering = null; try { validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details); } catch (Exception e) { - LOGGER.error("Service offering for VM import not compatible", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import VM: %s. %s", unmanagedInstance.getName(), StringUtils.defaultString(e.getMessage()))); + String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance.getName(), serviceOffering.getUuid(), StringUtils.defaultIfEmpty(e.getMessage(), "")); + LOGGER.error(errorMsg, e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } String internalCSName = unmanagedInstance.getInternalCSName(); @@ -950,9 +940,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } allDetails.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDisk.getController()); try { - checkUnmanagedDiskAndOfferingForImport(rootDisk, null, validatedServiceOffering, owner, zone, cluster, migrateAllowed); + checkUnmanagedDiskAndOfferingForImport(unmanagedInstance.getName(), rootDisk, null, validatedServiceOffering, owner, zone, cluster, migrateAllowed); if (CollectionUtils.isNotEmpty(dataDisks)) { // Data disk(s) present - checkUnmanagedDiskAndOfferingForImport(dataDisks, dataDiskOfferingMap, owner, zone, cluster, migrateAllowed); + checkUnmanagedDiskAndOfferingForImport(unmanagedInstance.getName(), dataDisks, dataDiskOfferingMap, owner, zone, cluster, migrateAllowed); allDetails.put(VmDetailConstants.DATA_DISK_CONTROLLER, dataDisks.get(0).getController()); } resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.volume, unmanagedInstanceDisks.size()); @@ -962,7 +952,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } // Check NICs and supplied networks Map nicIpAddressMap = getNicIpAddresses(unmanagedInstance.getNics(), callerNicIpAddressMap); - Map allNicNetworkMap = getUnmanagedNicNetworkMap(unmanagedInstance.getNics(), nicNetworkMap, nicIpAddressMap, zone, hostName, owner); + Map allNicNetworkMap = getUnmanagedNicNetworkMap(unmanagedInstance.getName(), unmanagedInstance.getNics(), nicNetworkMap, nicIpAddressMap, zone, hostName, owner); if (!CollectionUtils.isEmpty(unmanagedInstance.getNics())) { allDetails.put(VmDetailConstants.NIC_ADAPTER, unmanagedInstance.getNics().get(0).getAdapterType()); } @@ -976,8 +966,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { validatedServiceOffering, null, hostName, cluster.getHypervisorType(), allDetails, powerState); } catch (InsufficientCapacityException ice) { - LOGGER.error(String.format("Failed to import vm name: %s", instanceName), ice); - throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage()); + String errorMsg = String.format("Failed to import VM [%s] due to [%s].", instanceName, ice.getMessage()); + LOGGER.error(errorMsg, ice); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, errorMsg); } if (userVm == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName)); @@ -1035,23 +1026,29 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return userVm; } - @Override - public ListResponse listUnmanagedInstances(ListUnmanagedInstancesCmd cmd) { + private Cluster basicAccessChecks(Long clusterId) { final Account caller = CallContext.current().getCallingAccount(); if (caller.getType() != Account.Type.ADMIN) { - throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", caller.getUuid())); + throw new PermissionDeniedException(String.format("Cannot perform this operation, caller account [%s] is not ROOT Admin.", caller.getUuid())); } - final Long clusterId = cmd.getClusterId(); if (clusterId == null) { - throw new InvalidParameterValueException(String.format("Cluster ID cannot be null")); + throw new InvalidParameterValueException("Cluster ID cannot be null."); } final Cluster cluster = clusterDao.findById(clusterId); if (cluster == null) { - throw new InvalidParameterValueException(String.format("Cluster ID: %d cannot be found", clusterId)); + throw new InvalidParameterValueException(String.format("Cluster with ID [%d] cannot be found.", clusterId)); } if (cluster.getHypervisorType() != Hypervisor.HypervisorType.VMware) { - throw new InvalidParameterValueException(String.format("VM ingestion is currently not supported for hypervisor: %s", cluster.getHypervisorType().toString())); + throw new InvalidParameterValueException(String.format("VM import is currently not supported for hypervisor [%s].", cluster.getHypervisorType().toString())); } + return cluster; + } + + @Override + public ListResponse listUnmanagedInstances(ListUnmanagedInstancesCmd cmd) { + Long clusterId = cmd.getClusterId(); + Cluster cluster = basicAccessChecks(clusterId); + String keyword = cmd.getKeyword(); if (StringUtils.isNotEmpty(keyword)) { keyword = keyword.toLowerCase(); @@ -1093,25 +1090,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Override public UserVmResponse importUnmanagedInstance(ImportUnmanagedInstanceCmd cmd) { - final Account caller = CallContext.current().getCallingAccount(); - if (caller.getType() != Account.Type.ADMIN) { - throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", caller.getUuid())); - } - final Long clusterId = cmd.getClusterId(); - if (clusterId == null) { - throw new InvalidParameterValueException(String.format("Cluster ID cannot be null")); - } - final Cluster cluster = clusterDao.findById(clusterId); - if (cluster == null) { - throw new InvalidParameterValueException(String.format("Cluster ID: %d cannot be found", clusterId)); - } - if (cluster.getHypervisorType() != Hypervisor.HypervisorType.VMware) { - throw new InvalidParameterValueException(String.format("VM import is currently not supported for hypervisor: %s", cluster.getHypervisorType().toString())); - } + Long clusterId = cmd.getClusterId(); + Cluster cluster = basicAccessChecks(clusterId); + final DataCenter zone = dataCenterDao.findById(cluster.getDataCenterId()); final String instanceName = cmd.getName(); if (StringUtils.isEmpty(instanceName)) { - throw new InvalidParameterValueException(String.format("Instance name cannot be empty")); + throw new InvalidParameterValueException("Instance name cannot be empty"); } if (cmd.getDomainId() != null && StringUtils.isEmpty(cmd.getAccountName())) { throw new InvalidParameterValueException("domainid parameter must be specified with account parameter"); @@ -1140,7 +1125,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } final Long serviceOfferingId = cmd.getServiceOfferingId(); if (serviceOfferingId == null) { - throw new InvalidParameterValueException(String.format("Service offering ID cannot be null")); + throw new InvalidParameterValueException("Service offering ID cannot be null"); } final ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(serviceOfferingId); if (serviceOffering == null) { @@ -1160,7 +1145,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { String hostName = cmd.getHostName(); if (StringUtils.isEmpty(hostName)) { if (!NetUtils.verifyDomainNameLabel(instanceName, true)) { - throw new InvalidParameterValueException(String.format("Please provide hostname for the VM. VM name contains unsupported characters for it to be used as hostname")); + throw new InvalidParameterValueException("Please provide a valid hostname for the VM. VM name contains unsupported characters that cannot be used as hostname."); } hostName = instanceName; } @@ -1232,7 +1217,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { template.setGuestOSId(guestOSHypervisor.getGuestOsId()); } userVm = importVirtualMachineInternal(unmanagedInstance, instanceName, zone, cluster, host, - template, displayName, hostName, caller, owner, userId, + template, displayName, hostName, CallContext.current().getCallingAccount(), owner, userId, serviceOffering, dataDiskOfferingMap, nicNetworkMap, nicIpAddressMap, details, cmd.getMigrateAllowed(), forced); @@ -1316,8 +1301,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } if (hostId == null) { - throw new CloudRuntimeException("Cannot find a host to verify if the VM to unmanage " + - "with id = " + vmVO.getUuid() + " exists."); + throw new CloudRuntimeException(String.format("Cannot find a host to verify if the VM [%s] exists. Thus we are unable to unmanage it.", vmVO.getUuid())); } return hostId; } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index e16926e76dc..e47e5a67b4a 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -159,6 +159,7 @@ import com.cloud.storage.template.TemplateProp; import com.cloud.storage.template.VhdProcessor; import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.EncryptionUtil; +import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.SwiftUtil; @@ -272,6 +273,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S @Override public Answer executeRequest(Command cmd) { + s_logger.debug(LogUtils.logGsonWithoutException("Executing command %s [%s].", cmd.getClass().getSimpleName(), cmd)); if (cmd instanceof DownloadProgressCommand) { return _dlMgr.handleDownloadCommand(this, (DownloadProgressCommand)cmd); } else if (cmd instanceof DownloadCommand) { @@ -406,13 +408,17 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S NfsTO nfsImageStore = (NfsTO)srcStore; String secondaryStorageUrl = nfsImageStore.getUrl(); assert (secondaryStorageUrl != null); + String templateUrl = secondaryStorageUrl + File.separator + srcData.getPath(); + String templateDetails = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(template, "uuid", "path", "name"); + s_logger.debug(String.format("Trying to get disks of template [%s], using path [%s].", templateDetails, templateUrl)); + Pair templateInfo = decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName()); String templateRelativeFolderPath = templateInfo.first(); try { String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion); - s_logger.info("MDOVE Secondary storage mount point: " + secondaryMountPoint); + s_logger.info(String.format("Trying to find template [%s] in secondary storage root mount point [%s].", templateDetails, secondaryMountPoint)); String srcOVAFileName = getTemplateOnSecStorageFilePath(secondaryMountPoint, templateRelativeFolderPath, templateInfo.second(), ImageFormat.OVA.getFileExtension()); @@ -423,39 +429,46 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S command.add("--no-same-permissions"); command.add("-xf", srcOVAFileName); command.setWorkDir(secondaryMountPoint + File.separator + templateRelativeFolderPath); - s_logger.info("Executing command: " + command.toString()); + + s_logger.info(String.format("Trying to decompress OVA file [%s] using command [%s].", srcOVAFileName, command.toString())); String result = command.execute(); if (result != null) { - String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName; + String msg = String.format("Unable to unpack snapshot OVA file [%s] due to [%s].", srcOVAFileName, result); s_logger.error(msg); throw new Exception(msg); } + String directory = secondaryMountPoint + File.separator + templateRelativeFolderPath; command = new Script("chmod", 0, s_logger); command.add("-R"); - command.add("666", secondaryMountPoint + File.separator + templateRelativeFolderPath); + command.add("666", directory); + + s_logger.debug(String.format("Trying to add, recursivelly, permission 666 to directory [%s] using command [%s].", directory, command.toString())); result = command.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for " + secondaryMountPoint + File.separator + templateRelativeFolderPath + " due to " + result); + s_logger.warn(String.format("Unable to set permissions 666 for directory [%s] due to [%s].", directory, result)); } } Script command = new Script("cp", _timeout, s_logger); command.add(ovfFilePath); command.add(ovfFilePath + ORIGINAL_FILE_EXTENSION); + s_logger.debug(String.format("Trying to copy file from [%s] to [%s] using command [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, command.toString())); String result = command.execute(); if (result != null) { - String msg = "Unable to rename original OVF, error msg: " + result; + String msg = String.format("Unable to copy original OVF file [%s] to [%s] due to [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, result); s_logger.error(msg); } - s_logger.debug("Reading OVF " + ovfFilePath + " to retrive the number of disks present in OVA"); + s_logger.debug(String.format("Reading OVF file [%s] to retrive the number of disks present in OVA file.", ovfFilePath)); OVFHelper ovfHelper = new OVFHelper(); List disks = ovfHelper.getOVFVolumeInfoFromFile(ovfFilePath, configurationId); + s_logger.debug(LogUtils.logGsonWithoutException("Found %s disks reading OVF file [%s] and using configuration id [%s]. The disks specifications are [%s].", + disks.size(), ovfFilePath, configurationId, disks)); return new GetDatadisksAnswer(disks); } catch (Exception e) { - String msg = "Get Datadisk Template Count failed due to " + e.getMessage(); + String msg = String.format("Failed to get disks from template [%s] due to [%s].", templateDetails, e.getMessage()); s_logger.error(msg, e); return new GetDatadisksAnswer(msg); } @@ -584,7 +597,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S * Template url may or may not end with .ova extension */ public static Pair decodeTemplateRelativePathAndNameFromUrl(String storeUrl, String templateUrl, String defaultName) { - + s_logger.debug(String.format("Trying to get template relative path and name from URL [%s].", templateUrl)); String templateName = null; String mountPoint = null; if (templateUrl.endsWith(".ova")) { @@ -598,6 +611,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S templateName = templateUrl.substring(index + 1).replace(".ova", ""); if (templateName == null || templateName.isEmpty()) { + s_logger.debug(String.format("Cannot find template name from URL [%s]. Using default name [%s].", templateUrl, defaultName)); templateName = defaultName; } } else { @@ -608,11 +622,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S templateName = defaultName; } + s_logger.debug(String.format("Template relative path [%s] and name [%s] found from URL [%s].", mountPoint, templateName, templateUrl)); return new Pair(mountPoint, templateName); } public static String getTemplateOnSecStorageFilePath(String secStorageMountPoint, String templateRelativeFolderPath, String templateName, String fileExtension) { - + s_logger.debug(String.format("Trying to find template [%s] with file extension [%s] in secondary storage mount point [%s] using relative folder path [%s].", + templateName, fileExtension, secStorageMountPoint, templateRelativeFolderPath)); StringBuffer sb = new StringBuffer(); sb.append(secStorageMountPoint); if (!secStorageMountPoint.endsWith("/")) { @@ -699,17 +715,27 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } private String getOVFFilePath(String srcOVAFileName) { + s_logger.debug(String.format("Trying to get OVF file from OVA path [%s].", srcOVAFileName)); + File file = new File(srcOVAFileName); assert (_storage != null); String[] files = _storage.listFiles(file.getParent()); - if (files != null) { - for (String fileName : files) { - if (fileName.toLowerCase().endsWith(".ovf")) { - File ovfFile = new File(fileName); - return file.getParent() + File.separator + ovfFile.getName(); - } + + if (files == null) { + s_logger.warn(String.format("Cannot find any files in parent directory [%s] of OVA file [%s].", file.getParent(), srcOVAFileName)); + return null; + } + + s_logger.debug(String.format("Found [%s] files in parent directory of OVA file [%s]. Files found are [%s].", files.length + 1, file.getParent(), StringUtils.join(files, ", "))); + for (String fileName : files) { + if (fileName.toLowerCase().endsWith(".ovf")) { + File ovfFile = new File(fileName); + String ovfFilePath = file.getParent() + File.separator + ovfFile.getName(); + s_logger.debug(String.format("Found OVF file [%s] from OVA file [%s].", ovfFilePath, srcOVAFileName)); + return ovfFilePath; } } + s_logger.warn(String.format("Cannot find any OVF file in parent directory [%s] of OVA file [%s].", file.getParent(), srcOVAFileName)); return null; } @@ -2050,6 +2076,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S s_logger.warn(details); return new Answer(cmd, false, details); } + + // delete the directory if it is empty + if (snapshotDir.isDirectory() && snapshotDir.list().length == 0 && !snapshotDir.delete()) { + details = String.format("Unable to delete directory [%s] at path [%s].", snapshotDir.getName(), snapshotPath); + s_logger.debug(details); + return new Answer(cmd, false, details); + } return new Answer(cmd, true, null); } else if (dstore instanceof S3TO) { final S3TO s3 = (S3TO)dstore; @@ -2616,13 +2649,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return _parent; } try { + s_logger.debug(String.format("Trying to get root directory from secondary storage URL [%s] using NFS version [%s].", secUrl, nfsVersion)); URI uri = new URI(secUrl); String dir = mountUri(uri, nfsVersion); return _parent + "/" + dir; } catch (Exception e) { - String msg = "GetRootDir for " + secUrl + " failed due to " + e.toString(); - s_logger.error(msg, e); - throw new CloudRuntimeException(msg); + String msg = String.format("Failed to get root directory from secondary storage URL [%s], using NFS version [%s], due to [%s].", secUrl, nfsVersion, e.getMessage()); + throw new CloudRuntimeException(msg, e); } } diff --git a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py index 7c7f5e48d64..a8634a75ae3 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py @@ -668,8 +668,12 @@ class CsIP: logging.info("Not making dns publicly available") if self.config.has_metadata(): - app = CsApache(self) - app.setup() + if method == "add": + app = CsApache(self) + app.setup() + elif method == "delete": + app = CsApache(self) + app.remove() # If redundant then this is dealt with # by the primary backup functions diff --git a/systemvm/debian/opt/cloud/bin/cs/CsApp.py b/systemvm/debian/opt/cloud/bin/cs/CsApp.py index d8b3223f017..123171a09c0 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsApp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsApp.py @@ -34,7 +34,7 @@ class CsApache(CsApp): """ Set up Apache """ def remove(self): - file = "/etc/apache2/sites-enabled/vhost-%s.conf" % self.dev + file = "/etc/apache2/sites-enabled/vhost-%s.conf" % self.ip if os.path.isfile(file): os.remove(file) CsHelper.service("apache2", "restart") diff --git a/test/integration/smoke/test_ssvm.py b/test/integration/smoke/test_ssvm.py index cdbc7d2f58e..ad03c3d46e1 100644 --- a/test/integration/smoke/test_ssvm.py +++ b/test/integration/smoke/test_ssvm.py @@ -121,7 +121,7 @@ class TestSSVMs(cloudstackTestCase): # should return only ONE SSVM per zone # 2. The returned SSVM should be in Running state # 3. listSystemVM for secondarystoragevm should list publicip, - # privateip and link-localip + # privateip, link-localip and service offering id/name # 4. The gateway programmed on the ssvm by listSystemVm should be # the same as the gateway returned by listVlanIpRanges # 5. DNS entries must match those given for the zone @@ -188,6 +188,18 @@ class TestSSVMs(cloudstackTestCase): "Check whether SSVM has public IP field" ) + self.assertEqual( + hasattr(ssvm, 'serviceofferingid'), + True, + "Check whether SSVM has service offering id field" + ) + + self.assertEqual( + hasattr(ssvm, 'serviceofferingname'), + True, + "Check whether SSVM has service offering name field" + ) + # Fetch corresponding ip ranges information from listVlanIpRanges ipranges_response = list_vlan_ipranges( self.apiclient, @@ -261,8 +273,8 @@ class TestSSVMs(cloudstackTestCase): # 1. listSystemVM (systemvmtype=consoleproxy) should return # at least ONE CPVM per zone # 2. The returned ConsoleProxyVM should be in Running state - # 3. listSystemVM for console proxy should list publicip, privateip - # and link-localip + # 3. listSystemVM for console proxy should list publicip, privateip, + # link-localip and service offering id/name # 4. The gateway programmed on the console proxy should be the same # as the gateway returned by listZones # 5. DNS entries must match those given for the zone @@ -327,6 +339,18 @@ class TestSSVMs(cloudstackTestCase): True, "Check whether CPVM has public IP field" ) + + self.assertEqual( + hasattr(cpvm, 'serviceofferingid'), + True, + "Check whether CPVM has service offering id field" + ) + + self.assertEqual( + hasattr(cpvm, 'serviceofferingname'), + True, + "Check whether CPVM has service offering name field" + ) # Fetch corresponding ip ranges information from listVlanIpRanges ipranges_response = list_vlan_ipranges( self.apiclient, diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 63bef1240c8..88b94acf49b 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -517,6 +517,7 @@ "label.copy.clipboard": "Copy to clipboard", "label.copy.consoleurl": "Copy console URL to clipboard", "label.copyid": "Copy ID", +"label.copy.password": "Copy password", "label.core": "Core", "label.core.zone.type": "Core zone type", "label.counter": "Counter", diff --git a/ui/src/components/page/GlobalLayout.vue b/ui/src/components/page/GlobalLayout.vue index d807525068e..7a26599a2ba 100644 --- a/ui/src/components/page/GlobalLayout.vue +++ b/ui/src/components/page/GlobalLayout.vue @@ -199,7 +199,8 @@ export default { created () { this.menus = this.mainMenu.find((item) => item.path === '/').children this.collapsed = !this.sidebarOpened - setInterval(this.checkShutdown, 5000) + const readyForShutdownPollingJob = setInterval(this.checkShutdown, 5000) + this.$store.commit('SET_READY_FOR_SHUTDOWN_POLLING_JOB', readyForShutdownPollingJob) }, mounted () { const layoutMode = this.$config.theme['@layout-mode'] || 'light' diff --git a/ui/src/components/view/InfoCard.vue b/ui/src/components/view/InfoCard.vue index 550bba74409..90952b0ea42 100644 --- a/ui/src/components/view/InfoCard.vue +++ b/ui/src/components/view/InfoCard.vue @@ -523,7 +523,7 @@
{{ $t('label.serviceofferingname') }}
- {{ resource.serviceofferingname || resource.serviceofferingid }} + {{ resource.serviceofferingname || resource.serviceofferingid }} {{ resource.serviceofferingname || resource.serviceofferingid }} {{ resource.serviceofferingname || resource.serviceofferingid }}
diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index 6602ecb884c..22ee2b007f5 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -370,7 +370,13 @@ export default { message: 'message.action.instance.reset.password', dataView: true, show: (record) => { return ['Stopped'].includes(record.state) && record.passwordenabled }, - response: (result) => { return result.virtualmachine && result.virtualmachine.password ? `The password of VM ${result.virtualmachine.displayname} is ${result.virtualmachine.password}` : null } + response: (result) => { + return { + message: result.virtualmachine && result.virtualmachine.password ? `The password of VM ${result.virtualmachine.displayname} is ${result.virtualmachine.password}` : null, + copybuttontext: result.virtualmachine.password ? 'label.copy.password' : null, + copytext: result.virtualmachine.password ? result.virtualmachine.password : null + } + } }, { api: 'resetSSHKeyForVirtualMachine', diff --git a/ui/src/store/getters.js b/ui/src/store/getters.js index b0c4ecdfccd..67b168be8c2 100644 --- a/ui/src/store/getters.js +++ b/ui/src/store/getters.js @@ -50,7 +50,8 @@ const getters = { twoFaIssuer: state => state.user.twoFaIssuer, loginFlag: state => state.user.loginFlag, allProjects: (state) => state.app.allProjects, - customHypervisorName: state => state.user.customHypervisorName + customHypervisorName: state => state.user.customHypervisorName, + readyForShutdownPollingJob: state => state.user.readyForShutdownPollingJob } export default getters diff --git a/ui/src/store/modules/app.js b/ui/src/store/modules/app.js index b3130b68d61..cf2b34e4b8e 100644 --- a/ui/src/store/modules/app.js +++ b/ui/src/store/modules/app.js @@ -130,6 +130,9 @@ const app = { }, SET_SHUTDOWN_TRIGGERED: (state, shutdownTriggered) => { state.shutdownTriggered = shutdownTriggered + }, + SET_READY_FOR_SHUTDOWN_POLLING_JOB: (state, readyForShutdownPollingJob) => { + state.readyForShutdownPollingJob = readyForShutdownPollingJob } }, actions: { @@ -192,6 +195,9 @@ const app = { }, SetShutdownTriggered ({ commit }, bool) { commit('SET_SHUTDOWN_TRIGGERED', bool) + }, + SetReadyForShutdownPollingJob ({ commit }, job) { + commit('SET_READY_FOR_SHUTDOWN_POLLING_JOB', job) } } } diff --git a/ui/src/store/modules/user.js b/ui/src/store/modules/user.js index 6a3ba217baf..0e45ac7e676 100644 --- a/ui/src/store/modules/user.js +++ b/ui/src/store/modules/user.js @@ -65,7 +65,8 @@ const user = { twoFaEnabled: false, twoFaProvider: '', twoFaIssuer: '', - customHypervisorName: 'Custom' + customHypervisorName: 'Custom', + readyForShutdownPollingJob: '' }, mutations: { @@ -155,6 +156,9 @@ const user = { }, SET_CUSTOM_HYPERVISOR_NAME (state, name) { state.customHypervisorName = name + }, + SET_READY_FOR_SHUTDOWN_POLLING_JOB: (state, job) => { + state.readyForShutdownPollingJob = job } }, diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index ba1b87042b7..fa33be8d1b0 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -444,7 +444,8 @@