Merge branch 'main' of https://github.com/apache/cloudstack into nsx-integration

This commit is contained in:
Pearl Dsilva 2023-10-03 07:32:12 -04:00
commit e1d56d0a62
70 changed files with 781 additions and 525 deletions

View File

@ -40,6 +40,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.naming.ConfigurationException;
import com.cloud.resource.AgentStatusUpdater;
import com.cloud.resource.ResourceStatusUpdater;
import com.cloud.utils.NumbersUtil;
import org.apache.cloudstack.agent.lb.SetupMSListAnswer;
import org.apache.cloudstack.agent.lb.SetupMSListCommand;
@ -100,7 +102,7 @@ import com.cloud.utils.script.Script;
* For more configuration options, see the individual types.
*
**/
public class Agent implements HandlerFactory, IAgentControl {
public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater {
protected static Logger s_logger = Logger.getLogger(Agent.class);
public enum ExitStatus {
@ -409,6 +411,20 @@ public class Agent implements HandlerFactory, IAgentControl {
}
}
public void triggerUpdate() {
PingCommand command = _resource.getCurrentStatus(getId());
command.setOutOfBand(true);
s_logger.debug("Sending out of band ping");
final Request request = new Request(_id, -1, command, false);
request.setSequence(getNextSequence());
try {
_link.send(request.toBytes());
} catch (final ClosedChannelException e) {
s_logger.warn("Unable to send ping update: " + request.toString());
}
}
protected void cancelTasks() {
synchronized (_watchList) {
for (final WatchTask task : _watchList) {
@ -461,6 +477,10 @@ public class Agent implements HandlerFactory, IAgentControl {
} catch (final ClosedChannelException e) {
s_logger.warn("Unable to send request: " + request.toString());
}
if (_resource instanceof ResourceStatusUpdater) {
((ResourceStatusUpdater) _resource).registerStatusUpdater(this);
}
}
}

View File

@ -30,7 +30,7 @@ public interface Account extends ControlledEntity, InternalIdentity, Identity {
* Account states.
* */
enum State {
DISABLED, ENABLED, LOCKED;
DISABLED, ENABLED, LOCKED, REMOVED;
/**
* The toString method was overridden to maintain consistency in the DB, as the GenericDaoBase uses toString in the enum value to make the sql statements

View File

@ -16,6 +16,7 @@
// under the License.
package org.apache.cloudstack.api.command.admin.config;
import com.cloud.utils.crypt.DBEncryptionUtil;
import org.apache.cloudstack.acl.RoleService;
import org.apache.cloudstack.api.response.DomainResponse;
import org.apache.log4j.Logger;
@ -150,25 +151,50 @@ public class UpdateCfgCmd extends BaseCmd {
if (cfg != null) {
ConfigurationResponse response = _responseGenerator.createConfigurationResponse(cfg);
response.setResponseName(getCommandName());
if (getZoneId() != null) {
response.setScope("zone");
}
if (getClusterId() != null) {
response.setScope("cluster");
}
if (getStoragepoolId() != null) {
response.setScope("storagepool");
}
if (getAccountId() != null) {
response.setScope("account");
}
if (getDomainId() != null) {
response.setScope("domain");
}
response.setValue(value);
response = setResponseScopes(response);
response = setResponseValue(response, cfg);
this.setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update config");
}
}
/**
* Sets the configuration value in the response. If the configuration is in the `Hidden` or `Secure` categories, the value is encrypted before being set in the response.
* @param response to be set with the configuration `cfg` value
* @param cfg to be used in setting the response value
* @return the response with the configuration's value
*/
public ConfigurationResponse setResponseValue(ConfigurationResponse response, Configuration cfg) {
if (cfg.isEncrypted()) {
response.setValue(DBEncryptionUtil.encrypt(getValue()));
} else {
response.setValue(getValue());
}
return response;
}
/**
* Sets the scope for the Configuration response only if the field is not null.
* @param response to be updated
* @return the response updated with the scopes
*/
public ConfigurationResponse setResponseScopes(ConfigurationResponse response) {
if (getZoneId() != null) {
response.setScope("zone");
}
if (getClusterId() != null) {
response.setScope("cluster");
}
if (getStoragepoolId() != null) {
response.setScope("storagepool");
}
if (getAccountId() != null) {
response.setScope("account");
}
if (getDomainId() != null) {
response.setScope("domain");
}
return response;
}
}

View File

@ -178,6 +178,14 @@ public class SystemVmResponse extends BaseResponseWithAnnotations {
@Param(description = "true if vm contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory.")
private Boolean isDynamicallyScalable;
@SerializedName(ApiConstants.SERVICE_OFFERING_ID)
@Param(description = "the ID of the service offering of the system virtual machine.")
private String serviceOfferingId;
@SerializedName("serviceofferingname")
@Param(description = "the name of the service offering of the system virtual machine.")
private String serviceOfferingName;
@Override
public String getObjectId() {
return this.getId();
@ -466,4 +474,20 @@ public class SystemVmResponse extends BaseResponseWithAnnotations {
public void setDynamicallyScalable(Boolean dynamicallyScalable) {
isDynamicallyScalable = dynamicallyScalable;
}
public String getServiceOfferingId() {
return serviceOfferingId;
}
public void setServiceOfferingId(String serviceOfferingId) {
this.serviceOfferingId = serviceOfferingId;
}
public String getServiceOfferingName() {
return serviceOfferingName;
}
public void setServiceOfferingName(String serviceOfferingName) {
this.serviceOfferingName = serviceOfferingName;
}
}

View File

@ -422,11 +422,6 @@
<artifactId>cloud-engine-components-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-network</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-orchestration</artifactId>

View File

@ -24,6 +24,7 @@ import com.cloud.host.Host;
public class PingCommand extends Command {
Host.Type hostType;
long hostId;
boolean outOfBand;
protected PingCommand() {
}
@ -33,6 +34,12 @@ public class PingCommand extends Command {
hostId = id;
}
public PingCommand(Host.Type type, long id, boolean oob) {
hostType = type;
hostId = id;
outOfBand = oob;
}
public Host.Type getHostType() {
return hostType;
}
@ -41,6 +48,10 @@ public class PingCommand extends Command {
return hostId;
}
public boolean getOutOfBand() { return outOfBand; }
public void setOutOfBand(boolean oob) { this.outOfBand = oob; }
@Override
public boolean executeInSequence() {
return false;

View File

@ -0,0 +1,27 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.resource;
/**
* AgentStatusUpdater is an agent with triggerable update functionality
*/
public interface AgentStatusUpdater {
/**
* Trigger the sending of an update (Ping).
*/
void triggerUpdate();
}

View File

@ -0,0 +1,29 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.resource;
/**
* ResourceStatusUpdater is a resource that can trigger out of band status updates
*/
public interface ResourceStatusUpdater {
/**
* Register an AgentStatusUpdater to use for triggering out of band updates.
*
* @param updater The object to call triggerUpdate() on
*/
void registerStatusUpdater(AgentStatusUpdater updater);
}

View File

@ -25,6 +25,9 @@ public class CheckUrlCommand extends Command {
private String format;
private String url;
private Integer connectTimeout;
private Integer connectionRequestTimeout;
private Integer socketTimeout;
public String getFormat() {
return format;
@ -34,12 +37,27 @@ public class CheckUrlCommand extends Command {
return url;
}
public Integer getConnectTimeout() { return connectTimeout; }
public Integer getConnectionRequestTimeout() { return connectionRequestTimeout; }
public Integer getSocketTimeout() { return socketTimeout; }
public CheckUrlCommand(final String format,final String url) {
super();
this.format = format;
this.url = url;
}
public CheckUrlCommand(final String format,final String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
super();
this.format = format;
this.url = url;
this.connectTimeout = connectTimeout;
this.socketTimeout = socketTimeout;
this.connectionRequestTimeout = connectionRequestTimeout;
}
@Override
public boolean executeInSequence() {
return false;

View File

@ -54,7 +54,7 @@ public class DirectDownloadHelper {
public static boolean checkUrlExistence(String url) {
try {
DirectTemplateDownloader checker = getCheckerDownloader(url);
DirectTemplateDownloader checker = getCheckerDownloader(url, null, null, null);
return checker.checkUrl(url);
} catch (CloudRuntimeException e) {
LOGGER.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e);
@ -62,22 +62,37 @@ public class DirectDownloadHelper {
}
}
private static DirectTemplateDownloader getCheckerDownloader(String url) {
public static boolean checkUrlExistence(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
try {
DirectTemplateDownloader checker = getCheckerDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout);
return checker.checkUrl(url);
} catch (CloudRuntimeException e) {
LOGGER.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e);
return false;
}
}
private static DirectTemplateDownloader getCheckerDownloader(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
if (url.toLowerCase().startsWith("https:")) {
return new HttpsDirectTemplateDownloader(url);
return new HttpsDirectTemplateDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout);
} else if (url.toLowerCase().startsWith("http:")) {
return new HttpDirectTemplateDownloader(url);
return new HttpDirectTemplateDownloader(url, connectTimeout, socketTimeout);
} else if (url.toLowerCase().startsWith("nfs:")) {
return new NfsDirectTemplateDownloader(url);
} else if (url.toLowerCase().endsWith(".metalink")) {
return new MetalinkDirectTemplateDownloader(url);
return new MetalinkDirectTemplateDownloader(url, connectTimeout, socketTimeout);
} else {
throw new CloudRuntimeException(String.format("Cannot find a download checker for url: %s", url));
}
}
public static Long getFileSize(String url, String format) {
DirectTemplateDownloader checker = getCheckerDownloader(url);
DirectTemplateDownloader checker = getCheckerDownloader(url, null, null, null);
return checker.getRemoteFileSize(url, format);
}
public static Long getFileSize(String url, String format, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
DirectTemplateDownloader checker = getCheckerDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout);
return checker.getRemoteFileSize(url, format);
}
}

View File

@ -50,8 +50,8 @@ public class HttpDirectTemplateDownloader extends DirectTemplateDownloaderImpl {
protected GetMethod request;
protected Map<String, String> reqHeaders = new HashMap<>();
protected HttpDirectTemplateDownloader(String url) {
this(url, null, null, null, null, null, null, null);
protected HttpDirectTemplateDownloader(String url, Integer connectTimeout, Integer socketTimeout) {
this(url, null, null, null, null, connectTimeout, socketTimeout, null);
}
public HttpDirectTemplateDownloader(String url, Long templateId, String destPoolPath, String checksum,

View File

@ -65,8 +65,8 @@ public class HttpsDirectTemplateDownloader extends DirectTemplateDownloaderImpl
protected CloseableHttpClient httpsClient;
private HttpUriRequest req;
protected HttpsDirectTemplateDownloader(String url) {
this(url, null, null, null, null, null, null, null, null);
protected HttpsDirectTemplateDownloader(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
this(url, null, null, null, null, connectTimeout, socketTimeout, connectionRequestTimeout, null);
}
public HttpsDirectTemplateDownloader(String url, Long templateId, String destPoolPath, String checksum, Map<String, String> headers,

View File

@ -60,8 +60,8 @@ public class MetalinkDirectTemplateDownloader extends DirectTemplateDownloaderIm
}
}
protected MetalinkDirectTemplateDownloader(String url) {
this(url, null, null, null, null, null, null, null);
protected MetalinkDirectTemplateDownloader(String url, Integer connectTimeout, Integer socketTimeout) {
this(url, null, null, null, null, connectTimeout, socketTimeout, null);
}
public MetalinkDirectTemplateDownloader(String url, String destPoolPath, Long templateId, String checksum,

View File

@ -56,7 +56,7 @@ public class BaseDirectTemplateDownloaderTest {
private HttpEntity httpEntity;
@InjectMocks
protected HttpsDirectTemplateDownloader httpsDownloader = new HttpsDirectTemplateDownloader(httpUrl);
protected HttpsDirectTemplateDownloader httpsDownloader = new HttpsDirectTemplateDownloader(httpUrl, 1000, 1000, 1000);
@Before
public void init() throws IOException {

View File

@ -25,7 +25,8 @@ import org.mockito.InjectMocks;
public class MetalinkDirectTemplateDownloaderTest extends BaseDirectTemplateDownloaderTest {
@InjectMocks
protected MetalinkDirectTemplateDownloader metalinkDownloader = new MetalinkDirectTemplateDownloader(httpsUrl);
protected MetalinkDirectTemplateDownloader metalinkDownloader = new MetalinkDirectTemplateDownloader(httpsUrl, 1000, 1000);
@Test
public void testCheckUrlMetalink() {
metalinkDownloader.downloader = httpsDownloader;

View File

@ -79,7 +79,7 @@ public interface VolumeOrchestrationService {
Long.class,
"storage.max.volume.size",
"2000",
"The maximum size for a volume (in GB).",
"The maximum size for a volume (in GiB).",
true);
VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType)

View File

@ -1,37 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.service.api;
import java.net.URI;
import java.util.List;
import com.cloud.utils.component.PluggableService;
public interface DirectoryService {
void registerService(String serviceName, URI endpoint);
void unregisterService(String serviceName, URI endpoint);
List<URI> getEndPoints(String serviceName);
URI getLoadBalancedEndPoint(String serviceName);
List<PluggableService> listServices();
}

View File

@ -1,57 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.service.api;
import java.util.List;
import javax.ws.rs.Path;
import com.cloud.network.Network;
import com.cloud.storage.Volume;
import com.cloud.vm.VirtualMachine;
/**
* Service to retrieve CloudStack entities
* very likely to change
*/
@Path("resources")
public interface EntityService {
List<String> listVirtualMachines();
List<String> listVolumes();
List<String> listNetworks();
List<String> listNics();
List<String> listSnapshots();
List<String> listTemplates();
List<String> listStoragePools();
List<String> listHosts();
VirtualMachine getVirtualMachine(String vm);
Volume getVolume(String volume);
Network getNetwork(String network);
}

View File

@ -1,56 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.service.api;
import java.net.URL;
import java.util.List;
import com.cloud.alert.Alert;
public interface OperationsServices {
// List<AsyncJob> listJobs();
//
// List<AsyncJob> listJobsInProgress();
//
// List<AsyncJob> listJobsCompleted();
//
// List<AsyncJob> listJobsCompleted(Long from);
//
// List<AsyncJob> listJobsInWaiting();
void cancelJob(String job);
List<Alert> listAlerts();
Alert getAlert(String uuid);
void cancelAlert(String alert);
void registerForAlerts();
String registerForEventNotifications(String type, String topic, URL url);
boolean deregisterForEventNotifications(String notificationId);
/**
* @return the list of event topics someone can register for
*/
List<String> listEventTopics();
}

View File

@ -1,47 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-engine-network</artifactId>
<name>Apache CloudStack Cloud Engine API</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-components-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-ipc</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -1,40 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.network;
public interface NetworkOrchestrator {
/**
* Prepares for a VM to join a network
* @param vm vm
* @param reservationId reservation id
*/
void prepare(String vm, String reservationId);
/**
* Release all reservation
*/
void release(String vm, String reservationId);
/**
* Cancel a previous reservation
* @param reservationId
*/
void cancel(String reservationId);
}

View File

@ -3760,7 +3760,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (cmd instanceof PingRoutingCommand) {
final PingRoutingCommand ping = (PingRoutingCommand)cmd;
if (ping.getHostVmStateReport() != null) {
_syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport());
_syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand());
}
scanStalledVMInTransitionStateOnUpHost(agentId);

View File

@ -27,7 +27,7 @@ public interface VirtualMachinePowerStateSync {
void processHostVmStateReport(long hostId, Map<String, HostVmStateReportEntry> report);
// to adapt legacy ping report
void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report);
void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force);
Map<Long, VirtualMachine.PowerState> convertVmStateReport(Map<String, HostVmStateReportEntry> states);
}

View File

@ -55,19 +55,19 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
s_logger.debug("Process host VM state report. host: " + hostId);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo);
processReport(hostId, translatedInfo, false);
}
@Override
public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report) {
public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force) {
if (s_logger.isDebugEnabled())
s_logger.debug("Process host VM state report from ping process. host: " + hostId);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo);
processReport(hostId, translatedInfo, force);
}
private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo) {
private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo, boolean force) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size());
@ -117,7 +117,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// Make sure powerState is up to date for missing VMs
try {
if (!_instanceDao.isPowerStateUpToDate(instance.getId())) {
if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) {
s_logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId());
_instanceDao.resetVmPowerStateTracking(instance.getId());
continue;
@ -150,7 +150,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime();
if (milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
s_logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period");
// this is were a race condition might have happened if we don't re-fetch the instance;

View File

@ -47,7 +47,6 @@
<!-- keep in alphabetic order -->
<module>api</module>
<module>components-api</module>
<module>network</module>
<module>orchestration</module>
<module>schema</module>
<module>service</module>

View File

@ -23,7 +23,6 @@ import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.Encrypt;
import org.apache.cloudstack.api.InternalIdentity;
@Entity
@ -40,7 +39,6 @@ public class DomainDetailVO implements InternalIdentity {
@Column(name = "name")
private String name;
@Encrypt
@Column(name = "value")
private String value;

View File

@ -33,7 +33,6 @@ import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.TransactionLegacy;
@Component
public class ProjectDaoImpl extends GenericDaoBase<ProjectVO, Long> implements ProjectDao {
@ -71,22 +70,8 @@ public class ProjectDaoImpl extends GenericDaoBase<ProjectVO, Long> implements P
@Override
@DB
public boolean remove(Long projectId) {
boolean result = false;
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
ProjectVO projectToRemove = findById(projectId);
projectToRemove.setName(null);
if (!update(projectId, projectToRemove)) {
s_logger.warn("Failed to reset name for the project id=" + projectId + " as a part of project remove");
return false;
}
_tagsDao.removeByIdAndType(projectId, ResourceObjectType.Project);
result = super.remove(projectId);
txn.commit();
return result;
return super.remove(projectId);
}
@Override

View File

@ -17,15 +17,19 @@
package com.cloud.upgrade.dao;
import com.cloud.upgrade.SystemVmTemplateRegistration;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.DateUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.log4j.Logger;
import org.jasypt.exceptions.EncryptionOperationNotPossibleException;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
@ -34,6 +38,10 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate
final static Logger LOG = Logger.getLogger(Upgrade41810to41900.class);
private SystemVmTemplateRegistration systemVmTemplateRegistration;
private static final String ACCOUNT_DETAILS = "account_details";
private static final String DOMAIN_DETAILS = "domain_details";
private final SimpleDateFormat[] formats = {
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"), new SimpleDateFormat("MM/dd/yyyy HH:mm:ss"), new SimpleDateFormat("dd/MM/yyyy HH:mm:ss"),
new SimpleDateFormat("EEE MMM dd HH:mm:ss z yyyy")};
@ -66,6 +74,7 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate
@Override
public void performDataMigration(Connection conn) {
decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(conn);
migrateBackupDates(conn);
}
@ -95,6 +104,37 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate
}
}
protected void decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(Connection conn) {
LOG.info("Decrypting global configuration values from the following tables: account_details and domain_details.");
Map<Long, String> accountsMap = getConfigsWithScope(conn, ACCOUNT_DETAILS);
updateConfigValuesWithScope(conn, accountsMap, ACCOUNT_DETAILS);
LOG.info("Successfully decrypted configurations from account_details table.");
Map<Long, String> domainsMap = getConfigsWithScope(conn, DOMAIN_DETAILS);
updateConfigValuesWithScope(conn, domainsMap, DOMAIN_DETAILS);
LOG.info("Successfully decrypted configurations from domain_details table.");
}
protected Map<Long, String> getConfigsWithScope(Connection conn, String table) {
Map<Long, String> configsToBeUpdated = new HashMap<>();
String selectDetails = String.format("SELECT details.id, details.value from cloud.%s details, cloud.configuration c " +
"WHERE details.name = c.name AND c.category NOT IN ('Hidden', 'Secure') AND details.value <> \"\" ORDER BY details.id;", table);
try (PreparedStatement pstmt = conn.prepareStatement(selectDetails)) {
try (ResultSet result = pstmt.executeQuery()) {
while (result.next()) {
configsToBeUpdated.put(result.getLong("id"), result.getString("value"));
}
}
return configsToBeUpdated;
} catch (SQLException e) {
String message = String.format("Unable to retrieve data from table [%s] due to [%s].", table, e.getMessage());
LOG.error(message, e);
throw new CloudRuntimeException(message, e);
}
}
public void migrateBackupDates(Connection conn) {
LOG.info("Trying to convert backups' date column from varchar(255) to datetime type.");
@ -125,6 +165,27 @@ public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate
}
}
protected void updateConfigValuesWithScope(Connection conn, Map<Long, String> configsToBeUpdated, String table) {
String updateConfigValues = String.format("UPDATE cloud.%s SET value = ? WHERE id = ?;", table);
for (Map.Entry<Long, String> config : configsToBeUpdated.entrySet()) {
try (PreparedStatement pstmt = conn.prepareStatement(updateConfigValues)) {
String decryptedValue = DBEncryptionUtil.decrypt(config.getValue());
pstmt.setString(1, decryptedValue);
pstmt.setLong(2, config.getKey());
LOG.info(String.format("Updating config with ID [%s] to value [%s].", config.getKey(), decryptedValue));
pstmt.executeUpdate();
} catch (SQLException | EncryptionOperationNotPossibleException e) {
String message = String.format("Unable to update config value with ID [%s] on table [%s] due to [%s]. The config value may already be decrypted.",
config.getKey(), table, e);
LOG.error(message);
throw new CloudRuntimeException(message, e);
}
}
}
private void fetchDatesAndMigrateToNewColumn(Connection conn) {
String selectBackupDates = "SELECT `id`, `old_date` FROM `cloud`.`backups` WHERE 1;";
String date;

View File

@ -25,8 +25,6 @@ import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.utils.db.Encrypt;
@Entity
@Table(name = "account_details")
public class AccountDetailVO implements InternalIdentity {
@ -41,7 +39,6 @@ public class AccountDetailVO implements InternalIdentity {
@Column(name = "name")
private String name;
@Encrypt
@Column(name = "value", length=4096)
private String value;

View File

@ -51,7 +51,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.capacity.standby', '10', 'The minimal number of command execution sessions that system is able to serve immediately(standby capacity)');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.cmd.execution.time.max', '30', 'The max command execution time in minute');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.session.max', '50', 'The max number of command execution sessions that a SSVM can handle');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Storage', 'DEFAULT', 'management-server', 'storage.max.volume.size', '2000', 'The maximum size for a volume (in GB).');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Storage', 'DEFAULT', 'management-server', 'storage.max.volume.size', '2000', 'The maximum size for a volume (in GiB).');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'task.cleanup.retry.interval', '600', 'Time (in seconds) to wait before retrying cleanup of tasks if the cleanup failed previously. 0 means to never retry.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.additional.vnc.portrange.start', '50000', 'Start port number of additional VNC port range');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.percluster.host.max', '8', 'maxmium hosts per vCenter cluster(do not let it grow over 8)');

View File

@ -282,3 +282,6 @@ FROM
`cloud`.`network_offering_details` AS `offering_details` ON `offering_details`.`network_offering_id` = `network_offerings`.`id` AND `offering_details`.`name`='internetProtocol'
GROUP BY
`network_offerings`.`id`;
-- Set removed state for all removed accounts
UPDATE `cloud`.`account` SET state='removed' WHERE `removed` IS NOT NULL;

View File

@ -54,11 +54,6 @@
<artifactId>cloud-engine-storage</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-network</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>

View File

@ -263,13 +263,9 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
}
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId());
}
s_logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh));
if (avoid.shouldAvoid(pool)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool is in avoid set, skipping this pool");
}
s_logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh));
return false;
}
@ -297,6 +293,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
}
if (!checkDiskProvisioningSupport(dskCh, pool)) {
s_logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType")));
return false;
}
@ -306,10 +304,12 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
Volume volume = volumeDao.findById(dskCh.getVolumeId());
if(!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) {
s_logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume));
return false;
}
if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) {
s_logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume));
return false;
}
@ -317,14 +317,14 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
List<Pair<Volume, DiskProfile>> requestVolumeDiskProfilePairs = new ArrayList<>();
requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh));
if (dskCh.getHypervisorType() == HypervisorType.VMware) {
// Skip the parent datastore cluster, consider only child storage pools in it
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) {
s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume));
return false;
}
// Skip the storage pool whose parent datastore cluster is not in UP state.
if (pool.getParent() != 0L) {
StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent());
if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) {
s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up));
return false;
}
}
@ -332,6 +332,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
try {
boolean isStoragePoolStoragepolicyComplaince = storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool);
if (!isStoragePoolStoragepolicyComplaince) {
s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume));
return false;
}
} catch (StorageUnavailableException e) {

View File

@ -100,9 +100,10 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
}
StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
suitablePools.add(storagePool);
} else {
s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh));
avoid.addPool(pool.getId());
}
}

View File

@ -82,9 +82,10 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
if (pool != null && pool.isLocal()) {
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
suitablePools.add(storagePool);
} else {
s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh));
avoid.addPool(pool.getId());
}
}
@ -107,8 +108,10 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
suitablePools.add(storagePool);
} else {
s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh));
avoid.addPool(pool.getId());
}
}

View File

@ -94,10 +94,11 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
LOGGER.trace(String.format("Found suitable local storage pool [%s], adding to list.", storage));
LOGGER.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh));
suitablePools.add(storagePool);
} else {
if (canAddStoragePoolToAvoidSet(storage)) {
LOGGER.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", storagePool, dskCh));
avoid.addPool(storagePool.getId());
}
}

View File

@ -46,8 +46,7 @@
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-components-api</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<scope>compile</scope>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -170,7 +170,7 @@ public class ConfigurationVO implements Configuration {
@Override
public boolean isEncrypted() {
return "Hidden".equals(getCategory()) || "Secure".equals(getCategory());
return StringUtils.equalsAny(getCategory(), "Hidden", "Secure");
}
@Override

View File

@ -475,6 +475,8 @@ public class EncryptionSecretKeyChanger {
// migrate resource details values
migrateHostDetails(conn);
migrateEncryptedAccountDetails(conn);
migrateEncryptedDomainDetails(conn);
migrateClusterDetails(conn);
migrateImageStoreDetails(conn);
migrateStoragePoolDetails(conn);
@ -497,6 +499,30 @@ public class EncryptionSecretKeyChanger {
return true;
}
private void migrateEncryptedAccountDetails(Connection conn) {
System.out.println("Beginning migration of account_details encrypted values");
String tableName = "account_details";
String selectSql = "SELECT details.id, details.value from account_details details, cloud.configuration c " +
"WHERE details.name = c.name AND c.category IN ('Hidden', 'Secure') AND details.value <> \"\" ORDER BY details.id;";
String updateSql = "UPDATE cloud.account_details SET value = ? WHERE id = ?;";
migrateValueAndUpdateDatabaseById(conn, tableName, selectSql, updateSql, false);
System.out.println("End migration of account details values");
}
private void migrateEncryptedDomainDetails(Connection conn) {
System.out.println("Beginning migration of domain_details encrypted values");
String tableName = "domain_details";
String selectSql = "SELECT details.id, details.value from domain_details details, cloud.configuration c " +
"WHERE details.name = c.name AND c.category IN ('Hidden', 'Secure') AND details.value <> \"\" ORDER BY details.id;";
String updateSql = "UPDATE cloud.domain_details SET value = ? WHERE id = ?;";
migrateValueAndUpdateDatabaseById(conn, tableName, selectSql, updateSql, false);
System.out.println("End migration of domain details values");
}
protected String migrateValue(String value) {
if (StringUtils.isEmpty(value)) {
return value;

View File

@ -83,6 +83,7 @@ import org.libvirt.DomainInfo;
import org.libvirt.DomainInfo.DomainState;
import org.libvirt.DomainInterfaceStats;
import org.libvirt.DomainSnapshot;
import org.libvirt.Library;
import org.libvirt.LibvirtException;
import org.libvirt.MemoryStatistic;
import org.libvirt.Network;
@ -90,6 +91,9 @@ import org.libvirt.SchedParameter;
import org.libvirt.SchedUlongParameter;
import org.libvirt.Secret;
import org.libvirt.VcpuInfo;
import org.libvirt.event.DomainEvent;
import org.libvirt.event.DomainEventDetail;
import org.libvirt.event.StoppedDetail;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@ -97,6 +101,7 @@ import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.HostVmStateReportEntry;
@ -175,6 +180,8 @@ import com.cloud.network.Networks.BroadcastDomainType;
import com.cloud.network.Networks.IsolationType;
import com.cloud.network.Networks.RouterPrivateIpStrategy;
import com.cloud.network.Networks.TrafficType;
import com.cloud.resource.AgentStatusUpdater;
import com.cloud.resource.ResourceStatusUpdater;
import com.cloud.resource.RequestWrapper;
import com.cloud.resource.ServerResource;
import com.cloud.resource.ServerResourceBase;
@ -224,11 +231,12 @@ import com.google.gson.Gson;
* private mac addresses for domrs | mac address | start + 126 || ||
* pool | the parent of the storage pool hierarchy * }
**/
public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer {
public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer, ResourceStatusUpdater {
protected static Logger s_logger = Logger.getLogger(LibvirtComputingResource.class);
private static final String CONFIG_VALUES_SEPARATOR = ",";
private static final String LEGACY = "legacy";
private static final String SECURE = "secure";
@ -457,6 +465,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
protected CPUStat cpuStat = new CPUStat();
protected MemStat memStat = new MemStat(dom0MinMem, dom0OvercommitMem);
private final LibvirtUtilitiesHelper libvirtUtilitiesHelper = new LibvirtUtilitiesHelper();
private AgentStatusUpdater _agentStatusUpdater;
protected Boolean enableManuallySettingCpuTopologyOnKvmVm = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM);
@ -481,6 +490,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return hypervisorQemuVersion;
}
@Override
public void registerStatusUpdater(AgentStatusUpdater updater) {
_agentStatusUpdater = updater;
}
@Override
public ExecutionResult executeInVR(final String routerIp, final String script, final String args) {
return executeInVR(routerIp, script, args, timeout);
@ -3590,9 +3604,63 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
} catch (final CloudRuntimeException e) {
s_logger.debug("Unable to initialize local storage pool: " + e);
}
setupLibvirtEventListener();
return sscmd;
}
private void setupLibvirtEventListener() {
final Thread libvirtListenerThread = new Thread(() -> {
try {
Library.runEventLoop();
} catch (LibvirtException e) {
s_logger.error("LibvirtException was thrown in event loop: ", e);
} catch (InterruptedException e) {
s_logger.error("Libvirt event loop was interrupted: ", e);
}
});
try {
libvirtListenerThread.setDaemon(true);
libvirtListenerThread.start();
Connect conn = LibvirtConnection.getConnection();
conn.addLifecycleListener(this::onDomainLifecycleChange);
s_logger.debug("Set up the libvirt domain event lifecycle listener");
} catch (LibvirtException e) {
s_logger.error("Failed to get libvirt connection for domain event lifecycle", e);
}
}
private int onDomainLifecycleChange(Domain domain, DomainEvent domainEvent) {
try {
s_logger.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent));
if (domainEvent != null) {
switch (domainEvent.getType()) {
case STOPPED:
/* libvirt-destroyed VMs have detail StoppedDetail.DESTROYED, self shutdown guests are StoppedDetail.SHUTDOWN
* Checking for this helps us differentiate between events where cloudstack or admin stopped the VM vs guest
* initiated, and avoid pushing extra updates for actions we are initiating without a need for extra tracking */
DomainEventDetail detail = domainEvent.getDetail();
if (StoppedDetail.SHUTDOWN.equals(detail) || StoppedDetail.CRASHED.equals(detail)) {
s_logger.info("Triggering out of band status update due to completed self-shutdown or crash of VM");
_agentStatusUpdater.triggerUpdate();
} else {
s_logger.debug("Event detail: " + detail);
}
break;
default:
s_logger.debug(String.format("No handling for event %s", domainEvent));
}
}
} catch (LibvirtException e) {
s_logger.error("Libvirt exception while processing lifecycle event", e);
} catch (Throwable e) {
s_logger.error("Error during lifecycle", e);
}
return 0;
}
public String diskUuidToSerial(String uuid) {
String uuidWithoutHyphen = uuid.replace("-","");
return uuidWithoutHyphen.substring(0, Math.min(uuidWithoutHyphen.length(), 20));

View File

@ -35,11 +35,16 @@ public class LibvirtCheckUrlCommand extends CommandWrapper<CheckUrlCommand, Chec
@Override
public CheckUrlAnswer execute(CheckUrlCommand cmd, LibvirtComputingResource serverResource) {
final String url = cmd.getUrl();
s_logger.info("Checking URL: " + url);
final Integer connectTimeout = cmd.getConnectTimeout();
final Integer connectionRequestTimeout = cmd.getConnectionRequestTimeout();
final Integer socketTimeout = cmd.getSocketTimeout();
s_logger.info(String.format("Checking URL: %s, with connect timeout: %d, connect request timeout: %d, socket timeout: %d", url, connectTimeout, connectionRequestTimeout, socketTimeout));
Long remoteSize = null;
boolean checkResult = DirectDownloadHelper.checkUrlExistence(url);
boolean checkResult = DirectDownloadHelper.checkUrlExistence(url, connectTimeout, connectionRequestTimeout, socketTimeout);
if (checkResult) {
remoteSize = DirectDownloadHelper.getFileSize(url, cmd.getFormat());
remoteSize = DirectDownloadHelper.getFileSize(url, cmd.getFormat(), connectTimeout, connectionRequestTimeout, socketTimeout);
if (remoteSize == null || remoteSize < 0) {
s_logger.error(String.format("Couldn't properly retrieve the remote size of the template on " +
"url %s, obtained size = %s", url, remoteSize));

View File

@ -30,7 +30,7 @@ import org.apache.log4j.Logger;
import com.cloud.user.Account;
@APICommand(name = ReadyForShutdownCmd.APINAME,
description = "Returs the status of CloudStack, whether a shutdown has been triggered and if ready to shutdown",
description = "Returns the status of CloudStack, whether a shutdown has been triggered and if ready to shutdown",
since = "4.19.0",
responseObject = ReadyForShutdownResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)

View File

@ -428,6 +428,19 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
}
private void resizeResource(DevelopersApi api, String resourceName, long sizeByte) throws ApiException {
VolumeDefinitionModify dfm = new VolumeDefinitionModify();
dfm.setSizeKib(sizeByte / 1024);
ApiCallRcList answers = api.volumeDefinitionModify(resourceName, 0, dfm);
if (answers.hasError()) {
s_logger.error("Resize error: " + answers.get(0).getMessage());
throw new CloudRuntimeException(answers.get(0).getMessage());
} else {
s_logger.info(String.format("Successfully resized %s to %d kib", resourceName, dfm.getSizeKib()));
}
}
private String cloneResource(long csCloneId, VolumeInfo volumeInfo, StoragePoolVO storagePoolVO) {
// get the cached template on this storage
VMTemplateStoragePoolVO tmplPoolRef = _vmTemplatePoolDao.findByPoolTemplate(
@ -452,6 +465,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
s_logger.info("Clone resource definition " + cloneRes + " to " + rscName + " finished");
if (volumeInfo.getSize() != null && volumeInfo.getSize() > 0) {
resizeResource(linstorApi, rscName, volumeInfo.getSize());
}
applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName());
applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops());
@ -738,26 +756,16 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
dfm.setSizeKib(resizeParameter.newSize / 1024);
try
{
resizeResource(api, rscName, resizeParameter.newSize);
applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops);
{
final VolumeVO volume = _volumeDao.findById(vol.getId());
volume.setMinIops(resizeParameter.newMinIops);
volume.setMaxIops(resizeParameter.newMaxIops);
volume.setSize(resizeParameter.newSize);
_volumeDao.update(volume.getId(), volume);
}
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, dfm);
if (answers.hasError())
{
s_logger.error("Resize error: " + answers.get(0).getMessage());
errMsg = answers.get(0).getMessage();
} else
{
s_logger.info(String.format("Successfully resized %s to %d kib", rscName, dfm.getSizeKib()));
vol.setSize(resizeParameter.newSize);
vol.update();
}
} catch (ApiException apiExc)
{
s_logger.error(apiExc);
@ -765,12 +773,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
CreateCmdResult result;
if (errMsg != null)
{
if (errMsg != null) {
result = new CreateCmdResult(null, new Answer(null, false, errMsg));
result.setResult(errMsg);
} else
{
} else {
// notify guests
result = notifyResize(vol, oldSize, resizeParameter);
}

View File

@ -1639,6 +1639,12 @@ public class ApiResponseHelper implements ResponseGenerator {
vmResponse.setCreated(vm.getCreated());
vmResponse.setHypervisor(vm.getHypervisorType().getHypervisorDisplayName());
ServiceOffering serviceOffering = ApiDBUtils.findServiceOfferingById(vm.getServiceOfferingId());
if (serviceOffering != null) {
vmResponse.setServiceOfferingId(serviceOffering.getUuid());
vmResponse.setServiceOfferingName(serviceOffering.getName());
}
if (vm.getHostId() != null) {
Host host = ApiDBUtils.findHostById(vm.getHostId());
if (host != null) {

View File

@ -3786,6 +3786,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
}
} else if (templateFilter == TemplateFilter.sharedexecutable || templateFilter == TemplateFilter.shared) {
// only show templates shared by others
if (permittedAccounts.isEmpty()) {
return new Pair<>(new ArrayList<>(), 0);
}
sc.addAnd("sharedAccountId", SearchCriteria.Op.IN, permittedAccountIds.toArray());
} else if (templateFilter == TemplateFilter.executable) {
SearchCriteria<TemplateJoinVO> scc = _templateJoinDao.createSearchCriteria();

View File

@ -49,6 +49,7 @@ import javax.naming.ConfigurationException;
import com.cloud.hypervisor.HypervisorGuru;
import com.cloud.network.dao.NsxProviderDao;
import com.cloud.network.element.NsxProviderVO;
import com.cloud.utils.crypt.DBEncryptionUtil;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.affinity.AffinityGroup;
import org.apache.cloudstack.affinity.AffinityGroupService;
@ -668,7 +669,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
@Override
@DB
public String updateConfiguration(final long userId, final String name, final String category, final String value, final String scope, final Long resourceId) {
public String updateConfiguration(final long userId, final String name, final String category, String value, final String scope, final Long resourceId) {
final String validationMsg = validateConfigurationValue(name, value, scope);
if (validationMsg != null) {
@ -681,6 +682,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
// if scope is mentioned as global or not mentioned then it is normal
// global parameter updation
if (scope != null && !scope.isEmpty() && !ConfigKey.Scope.Global.toString().equalsIgnoreCase(scope)) {
boolean valueEncrypted = shouldEncryptValue(category);
if (valueEncrypted) {
value = DBEncryptionUtil.encrypt(value);
}
switch (ConfigKey.Scope.valueOf(scope)) {
case Zone:
final DataCenterVO zone = _zoneDao.findById(resourceId);
@ -771,7 +777,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
default:
throw new InvalidParameterValueException("Scope provided is invalid");
}
return value;
return valueEncrypted ? DBEncryptionUtil.decrypt(value) : value;
}
// Execute all updates in a single transaction
@ -868,6 +875,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
return _configDao.getValue(name);
}
private boolean shouldEncryptValue(String category) {
return StringUtils.equalsAny(category, "Hidden", "Secure");
}
/**
* Updates the 'hypervisor.list' value to match the new custom hypervisor name set as newValue if the previous value was set
*/
@ -894,10 +905,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
final Long imageStoreId = cmd.getImageStoreId();
Long accountId = cmd.getAccountId();
Long domainId = cmd.getDomainId();
CallContext.current().setEventDetails(" Name: " + name + " New Value: " + (name.toLowerCase().contains("password") ? "*****" : value == null ? "" : value));
// check if config value exists
final ConfigurationVO config = _configDao.findByName(name);
String catergory = null;
String category = null;
String eventValue = encryptEventValueIfConfigIsEncrypted(config, value);
CallContext.current().setEventDetails(String.format(" Name: %s New Value: %s", name, eventValue));
final Account caller = CallContext.current().getCallingAccount();
if (_accountMgr.isDomainAdmin(caller.getId())) {
@ -916,9 +928,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
s_logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface");
throw new InvalidParameterValueException("Config parameter with name " + name + " doesn't exist");
}
catergory = _configDepot.get(name).category();
category = _configDepot.get(name).category();
} else {
catergory = config.getCategory();
category = config.getCategory();
}
validateIpAddressRelatedConfigValues(name, value);
@ -975,7 +987,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
value = (id == null) ? null : "";
}
final String updatedValue = updateConfiguration(userId, name, catergory, value, scope, id);
final String updatedValue = updateConfiguration(userId, name, category, value, scope, id);
if (value == null && updatedValue == null || updatedValue.equalsIgnoreCase(value)) {
return _configDao.findByName(name);
} else {
@ -983,6 +995,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
}
}
private String encryptEventValueIfConfigIsEncrypted(ConfigurationVO config, String value) {
if (config != null && config.isEncrypted()) {
return "*****";
}
return Objects.requireNonNullElse(value, "");
}
private ParamCountPair getParamCount(Map<String, Long> scopeMap) {
Long id = null;
int paramCount = 0;

View File

@ -29,4 +29,9 @@ public class CustomServerDiscoverer extends LibvirtServerDiscoverer {
protected String getPatchPath() {
return "scripts/vm/hypervisor/kvm/";
}
@Override
public void processHostAdded(long hostId) {
// Not using super class implementation here.
}
}

View File

@ -110,7 +110,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
@Override
public void processHostAdded(long hostId) {
HostVO host = hostDao.findById(hostId);
if (host != null) {
if (host != null && getHypervisorType().equals(host.getHypervisorType())) {
directDownloadManager.syncCertificatesToHost(hostId, host.getDataCenterId());
}
}

View File

@ -1001,7 +1001,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
final Map<Network.Service, Network.Provider> defaultNSXNetworkOfferingProviders = new HashMap<>();
defaultNSXNetworkOfferingProviders.put(Service.Dhcp, Provider.VPCVirtualRouter);
defaultNSXNetworkOfferingProviders.put(Service.Dns, Provider.VPCVirtualRouter);
defaultNSXNetworkOfferingProviders.put(Service.Dns, Provider.VPCVirtualRouter );
defaultNSXNetworkOfferingProviders.put(Service.SourceNat, Provider.Nsx);
defaultNSXNetworkOfferingProviders.put(Service.UserData, Provider.VPCVirtualRouter);

View File

@ -2388,6 +2388,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public boolean storagePoolHasEnoughIops(List<Pair<Volume, DiskProfile>> requestedVolumes, StoragePool pool) {
if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null) {
s_logger.debug(String.format("Cannot check if storage [%s] has enough IOPS to allocate volumes [%s].", pool, requestedVolumes));
return false;
}
@ -2418,8 +2419,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
long futureIops = currentIops + requestedIops;
return futureIops <= pool.getCapacityIops();
boolean hasEnoughIops = futureIops <= pool.getCapacityIops();
String hasCapacity = hasEnoughIops ? "has" : "does not have";
s_logger.debug(String.format("Pool [%s] %s enough IOPS to allocate volumes [%s].", pool, hasCapacity, requestedVolumes));
return hasEnoughIops;
}
@Override
@ -2430,10 +2433,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public boolean storagePoolHasEnoughSpace(List<Pair<Volume, DiskProfile>> volumeDiskProfilesList, StoragePool pool, Long clusterId) {
if (CollectionUtils.isEmpty(volumeDiskProfilesList)) {
s_logger.debug(String.format("Cannot check if pool [%s] has enough space to allocate volumes because the volumes list is empty.", pool));
return false;
}
if (!checkUsagedSpace(pool)) {
s_logger.debug(String.format("Cannot allocate pool [%s] because there is not enough space in this pool.", pool));
return false;
}
@ -2696,30 +2701,34 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volume) {
if (pool == null || volume == null) {
s_logger.debug(String.format("Cannot check if storage pool [%s] is compatible with volume [%s].", pool, volume));
return false;
}
if (volume.getPoolId() == null) {
// Volume is not allocated to any pool. Not possible to check compatibility with other pool, let it try
s_logger.debug(String.format("Volume [%s] is not allocated to any pool. Cannot check compatibility with pool [%s].", volume, pool));
return true;
}
StoragePool volumePool = _storagePoolDao.findById(volume.getPoolId());
if (volumePool == null) {
// Volume pool doesn't exist. Not possible to check compatibility with other pool, let it try
s_logger.debug(String.format("Pool [%s] used by volume [%s] does not exist. Cannot check compatibility.", pool, volume));
return true;
}
if (volume.getState() == Volume.State.Ready) {
if (volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex && pool.getPoolType() != Storage.StoragePoolType.PowerFlex) {
s_logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType()));
return false;
} else if (volumePool.getPoolType() != Storage.StoragePoolType.PowerFlex && pool.getPoolType() == Storage.StoragePoolType.PowerFlex) {
s_logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType()));
return false;
}
} else {
s_logger.debug(String.format("Cannot check compatibility of pool [%s] because volume [%s] is not in [%s] state.", pool, volume, Volume.State.Ready));
return false;
}
s_logger.debug(String.format("Pool [%s] is compatible with volume [%s].", pool, volume));
return true;
}

View File

@ -39,6 +39,7 @@ import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
import org.apache.cloudstack.direct.download.DirectDownloadManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@ -168,7 +169,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
if (host == null) {
throw new CloudRuntimeException("Couldn't find a host to validate URL " + url);
}
CheckUrlCommand cmd = new CheckUrlCommand(format, url);
Integer socketTimeout = DirectDownloadManager.DirectDownloadSocketTimeout.value();
Integer connectRequestTimeout = DirectDownloadManager.DirectDownloadConnectionRequestTimeout.value();
Integer connectTimeout = DirectDownloadManager.DirectDownloadConnectTimeout.value();
CheckUrlCommand cmd = new CheckUrlCommand(format, url, connectTimeout, connectRequestTimeout, socketTimeout);
s_logger.debug("Performing URL " + url + " validation on host " + host.getId());
Answer answer = _agentMgr.easySend(host.getId(), cmd);
if (answer == null || !answer.getResult()) {

View File

@ -814,6 +814,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
return false;
}
account.setState(State.REMOVED);
_accountDao.update(accountId, account);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Removed account " + accountId);
}

View File

@ -25,7 +25,8 @@ public interface PasswordPolicy {
Integer.class,
"password.policy.minimum.special.characters",
"0",
"Minimum number of special characters that the user's password must have. The value 0 means the user's password does not require any special characters.",
"Minimum number of special characters that the user's password must have. Any character that is neither a letter nor numeric is considered special. " +
"The value 0 means the user's password does not require any special characters.",
true,
ConfigKey.Scope.Domain);
@ -43,7 +44,7 @@ public interface PasswordPolicy {
Integer.class,
"password.policy.minimum.uppercase.letters",
"0",
"Minimum number of uppercase letters that the user's password must have. The value 0 means the user's password does not require any uppercase letters.",
"Minimum number of uppercase letters [A-Z] that the user's password must have. The value 0 means the user's password does not require any uppercase letters.",
true,
ConfigKey.Scope.Domain);
@ -52,7 +53,7 @@ public interface PasswordPolicy {
Integer.class,
"password.policy.minimum.lowercase.letters",
"0",
"Minimum number of lowercase letters that the user's password must have. The value 0 means the user's password does not require any lowercase letters.",
"Minimum number of lowercase letters [a-z] that the user's password must have. The value 0 means the user's password does not require any lowercase letters.",
true,
ConfigKey.Scope.Domain);
@ -61,7 +62,7 @@ public interface PasswordPolicy {
Integer.class,
"password.policy.minimum.digits",
"0",
"Minimum number of digits that the user's password must have. The value 0 means the user's password does not require any digits.",
"Minimum number of numeric characters [0-9] that the user's password must have. The value 0 means the user's password does not require any numeric characters.",
true,
ConfigKey.Scope.Domain);

View File

@ -58,7 +58,6 @@ import com.cloud.agent.api.GetUnmanagedInstancesAnswer;
import com.cloud.agent.api.GetUnmanagedInstancesCommand;
import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer;
import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand;
import com.cloud.capacity.CapacityManager;
import com.cloud.configuration.Config;
import com.cloud.configuration.Resource;
import com.cloud.dc.DataCenter;
@ -121,6 +120,7 @@ import com.cloud.user.ResourceLimitService;
import com.cloud.user.UserVO;
import com.cloud.user.dao.UserDao;
import com.cloud.uservm.UserVm;
import com.cloud.utils.LogUtils;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils;
@ -138,7 +138,6 @@ import com.cloud.vm.VmDetailConstants;
import com.cloud.vm.dao.NicDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import com.google.gson.Gson;
public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
@ -186,8 +185,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
@Inject
private VMInstanceDao vmDao;
@Inject
private CapacityManager capacityManager;
@Inject
private VolumeApiService volumeApiService;
@Inject
private DeploymentPlanningManager deploymentPlanningManager;
@ -206,8 +203,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
@Inject
private GuestOSHypervisorDao guestOSHypervisorDao;
@Inject
private VMSnapshotDao vmSnapshotDao;
@Inject
private SnapshotDao snapshotDao;
@Inject
private UserVmDao userVmDao;
@ -335,7 +330,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
String[] split = path.split(" ");
path = split[split.length - 1];
split = path.split("/");
;
path = split[split.length - 1];
split = path.split("\\.");
path = split[0];
@ -387,26 +381,29 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
private ServiceOfferingVO getUnmanagedInstanceServiceOffering(final UnmanagedInstanceTO instance, ServiceOfferingVO serviceOffering, final Account owner, final DataCenter zone, final Map<String, String> details)
throws ServerApiException, PermissionDeniedException, ResourceAllocationException {
if (instance == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM is not valid"));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Cannot find VM to import.");
}
if (serviceOffering == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Service offering is not valid"));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Cannot find service offering used to import VM [%s].", instance.getName()));
}
accountService.checkAccess(owner, serviceOffering, zone);
final Integer cpu = instance.getCpuCores();
final Integer memory = instance.getMemory();
Integer cpuSpeed = instance.getCpuSpeed() == null ? 0 : instance.getCpuSpeed();
if (cpu == null || cpu == 0) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("CPU cores for VM (%s) not valid", instance.getName()));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("CPU cores [%s] is not valid for importing VM [%s].", cpu, instance.getName()));
}
if (memory == null || memory == 0) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Memory for VM (%s) not valid", instance.getName()));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Memory [%s] is not valid for importing VM [%s].", memory, instance.getName()));
}
if (serviceOffering.isDynamic()) {
if (details.containsKey(VmDetailConstants.CPU_SPEED)) {
try {
cpuSpeed = Integer.parseInt(details.get(VmDetailConstants.CPU_SPEED));
} catch (Exception e) {
LOGGER.error(String.format("Failed to get CPU speed for importing VM [%s] due to [%s].", instance.getName(), e.getMessage()), e);
}
}
Map<String, String> parameters = new HashMap<>();
@ -429,8 +426,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Service offering (%s) %dMHz CPU speed does not match VM CPU speed %dMHz and VM is not in powered off state (Power state: %s)", serviceOffering.getUuid(), serviceOffering.getSpeed(), cpuSpeed, instance.getPowerState()));
}
}
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.cpu, new Long(serviceOffering.getCpu()));
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.memory, new Long(serviceOffering.getRamSize()));
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.cpu, Long.valueOf(serviceOffering.getCpu()));
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.memory, Long.valueOf(serviceOffering.getRamSize()));
return serviceOffering;
}
@ -520,10 +517,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
return new Pair<>(rootDisk, dataDisks);
}
private void checkUnmanagedDiskAndOfferingForImport(UnmanagedInstanceTO.Disk disk, DiskOffering diskOffering, ServiceOffering serviceOffering, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed)
private void checkUnmanagedDiskAndOfferingForImport(String instanceName, UnmanagedInstanceTO.Disk disk, DiskOffering diskOffering, ServiceOffering serviceOffering, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed)
throws ServerApiException, PermissionDeniedException, ResourceAllocationException {
if (serviceOffering == null && diskOffering == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID: %s not found during VM import", disk.getDiskId()));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID [%s] not found during VM [%s] import.", disk.getDiskId(), instanceName));
}
if (diskOffering != null) {
accountService.checkAccess(owner, diskOffering, zone);
@ -544,15 +541,15 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
}
private void checkUnmanagedDiskAndOfferingForImport(List<UnmanagedInstanceTO.Disk> disks, final Map<String, Long> diskOfferingMap, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed)
private void checkUnmanagedDiskAndOfferingForImport(String intanceName, List<UnmanagedInstanceTO.Disk> disks, final Map<String, Long> diskOfferingMap, final Account owner, final DataCenter zone, final Cluster cluster, final boolean migrateAllowed)
throws ServerApiException, PermissionDeniedException, ResourceAllocationException {
String diskController = null;
for (UnmanagedInstanceTO.Disk disk : disks) {
if (disk == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve disk details for VM"));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve disk details for VM [%s].", intanceName));
}
if (!diskOfferingMap.containsKey(disk.getDiskId())) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID: %s not found during VM import", disk.getDiskId()));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Disk offering for disk ID [%s] not found during VM import.", disk.getDiskId()));
}
if (StringUtils.isEmpty(diskController)) {
diskController = disk.getController();
@ -561,17 +558,12 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Multiple data disk controllers of different type (%s, %s) are not supported for import. Please make sure that all data disk controllers are of the same type", diskController, disk.getController()));
}
}
checkUnmanagedDiskAndOfferingForImport(disk, diskOfferingDao.findById(diskOfferingMap.get(disk.getDiskId())), null, owner, zone, cluster, migrateAllowed);
checkUnmanagedDiskAndOfferingForImport(intanceName, disk, diskOfferingDao.findById(diskOfferingMap.get(disk.getDiskId())), null, owner, zone, cluster, migrateAllowed);
}
}
private void checkUnmanagedNicAndNetworkForImport(UnmanagedInstanceTO.Nic nic, Network network, final DataCenter zone, final Account owner, final boolean autoAssign) throws ServerApiException {
if (nic == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve NIC details during VM import"));
}
if (network == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import", nic.getNicId()));
}
private void checkUnmanagedNicAndNetworkForImport(String instanceName, UnmanagedInstanceTO.Nic nic, Network network, final DataCenter zone, final Account owner, final boolean autoAssign) throws ServerApiException {
basicNetworkChecks(instanceName, nic, network);
if (network.getDataCenterId() != zone.getId()) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network(ID: %s) for nic(ID: %s) belongs to a different zone than VM to be imported", network.getUuid(), nic.getNicId()));
}
@ -588,34 +580,31 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
String pvLanType = nic.getPvlanType() == null ? "" : nic.getPvlanType().toLowerCase().substring(0, 1);
if (nic.getVlan() != null && nic.getVlan() != 0 && nic.getPvlan() != null && nic.getPvlan() != 0 &&
(StringUtils.isEmpty(network.getBroadcastUri().toString()) ||
!networkBroadcastUri.equals(String.format("pvlan://%d-%s%d", nic.getVlan(), pvLanType, nic.getPvlan())))) {
(StringUtils.isEmpty(networkBroadcastUri) || !String.format("pvlan://%d-%s%d", nic.getVlan(), pvLanType, nic.getPvlan()).equals(networkBroadcastUri))) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("PVLAN of network(ID: %s) %s is found different from the VLAN of nic(ID: %s) pvlan://%d-%s%d during VM import", network.getUuid(), networkBroadcastUri, nic.getNicId(), nic.getVlan(), pvLanType, nic.getPvlan()));
}
}
private void checkUnmanagedNicAndNetworkHostnameForImport(UnmanagedInstanceTO.Nic nic, Network network, final String hostName) throws ServerApiException {
private void basicNetworkChecks(String instanceName, UnmanagedInstanceTO.Nic nic, Network network) {
if (nic == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve NIC details during VM import"));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve the NIC details used by VM [%s] from VMware. Please check if this VM have NICs in VMWare.", instanceName));
}
if (network == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import", nic.getNicId()));
}
// Check for duplicate hostname in network, get all vms hostNames in the network
List<String> hostNames = vmDao.listDistinctHostNames(network.getId());
if (CollectionUtils.isNotEmpty(hostNames) && hostNames.contains(hostName)) {
throw new InvalidParameterValueException("The vm with hostName " + hostName + " already exists in the network domain: " + network.getNetworkDomain() + "; network="
+ network);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import.", nic.getNicId()));
}
}
private void checkUnmanagedNicIpAndNetworkForImport(UnmanagedInstanceTO.Nic nic, Network network, final Network.IpAddresses ipAddresses) throws ServerApiException {
if (nic == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Unable to retrieve NIC details during VM import"));
}
if (network == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Network for nic ID: %s not found during VM import", nic.getNicId()));
private void checkUnmanagedNicAndNetworkHostnameForImport(String instanceName, UnmanagedInstanceTO.Nic nic, Network network, final String hostName) throws ServerApiException {
basicNetworkChecks(instanceName, nic, network);
// Check for duplicate hostname in network, get all vms hostNames in the network
List<String> hostNames = vmDao.listDistinctHostNames(network.getId());
if (CollectionUtils.isNotEmpty(hostNames) && hostNames.contains(hostName)) {
throw new InvalidParameterValueException(String.format("VM with Name [%s] already exists in the network [%s] domain [%s]. Cannot import another VM with the same name. Pleasy try again with a different name.", hostName, network, network.getNetworkDomain()));
}
}
private void checkUnmanagedNicIpAndNetworkForImport(String instanceName, UnmanagedInstanceTO.Nic nic, Network network, final Network.IpAddresses ipAddresses) throws ServerApiException {
basicNetworkChecks(instanceName, nic, network);
// Check IP is assigned for non L2 networks
if (!network.getGuestType().equals(Network.GuestType.L2) && (ipAddresses == null || StringUtils.isEmpty(ipAddresses.getIp4Address()))) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("NIC(ID: %s) needs a valid IP address for it to be associated with network(ID: %s). %s parameter of API can be used for this", nic.getNicId(), network.getUuid(), ApiConstants.NIC_IP_ADDRESS_LIST));
@ -629,7 +618,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
}
private Map<String, Long> getUnmanagedNicNetworkMap(List<UnmanagedInstanceTO.Nic> nics, final Map<String, Long> callerNicNetworkMap, final Map<String, Network.IpAddresses> callerNicIpAddressMap, final DataCenter zone, final String hostName, final Account owner) throws ServerApiException {
private Map<String, Long> getUnmanagedNicNetworkMap(String instanceName, List<UnmanagedInstanceTO.Nic> nics, final Map<String, Long> callerNicNetworkMap, final Map<String, Network.IpAddresses> callerNicIpAddressMap, final DataCenter zone, final String hostName, final Account owner) throws ServerApiException {
Map<String, Long> nicNetworkMap = new HashMap<>();
String nicAdapter = null;
for (UnmanagedInstanceTO.Nic nic : nics) {
@ -654,22 +643,23 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
continue;
}
try {
checkUnmanagedNicAndNetworkForImport(nic, networkVO, zone, owner, true);
checkUnmanagedNicAndNetworkForImport(instanceName, nic, networkVO, zone, owner, true);
network = networkVO;
} catch (Exception e) {
LOGGER.error(String.format("Error when checking NIC [%s] of unmanaged instance to import due to [%s]." , nic.getNicId(), e.getMessage()), e);
}
if (network != null) {
checkUnmanagedNicAndNetworkHostnameForImport(nic, network, hostName);
checkUnmanagedNicIpAndNetworkForImport(nic, network, ipAddresses);
checkUnmanagedNicAndNetworkHostnameForImport(instanceName, nic, network, hostName);
checkUnmanagedNicIpAndNetworkForImport(instanceName, nic, network, ipAddresses);
break;
}
}
}
} else {
network = networkDao.findById(callerNicNetworkMap.get(nic.getNicId()));
checkUnmanagedNicAndNetworkForImport(nic, network, zone, owner, false);
checkUnmanagedNicAndNetworkHostnameForImport(nic, network, hostName);
checkUnmanagedNicIpAndNetworkForImport(nic, network, ipAddresses);
checkUnmanagedNicAndNetworkForImport(instanceName, nic, network, zone, owner, false);
checkUnmanagedNicAndNetworkHostnameForImport(instanceName, nic, network, hostName);
checkUnmanagedNicIpAndNetworkForImport(instanceName, nic, network, ipAddresses);
}
if (network == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Suitable network for nic(ID: %s) not found during VM import", nic.getNicId()));
@ -745,14 +735,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
try {
dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null);
} catch (Exception e) {
LOGGER.warn(String.format("VM import failed for unmanaged vm: %s during vm migration, finding deployment destination", vm.getInstanceName()), e);
String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm.getInstanceName(), e.getMessage());
LOGGER.warn(errorMsg, e);
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration, finding deployment destination", vm.getInstanceName()));
}
if (dest != null) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(" Found " + dest + " for migrating the vm to");
}
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg);
}
if (dest == null) {
cleanupFailedImportVM(vm);
@ -769,9 +755,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
vm = userVmManager.getUserVm(vm.getId());
} catch (Exception e) {
LOGGER.error(String.format("VM import failed for unmanaged vm: %s during vm migration", vm.getInstanceName()), e);
String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm.getInstanceName(), e.getMessage());
LOGGER.error(errorMsg, e);
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration. %s", userVm.getInstanceName(), e.getMessage()));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg);
}
}
for (Pair<DiskProfile, StoragePool> diskProfileStoragePool : diskProfileStoragePoolList) {
@ -857,9 +844,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOfferingVO serviceOfferingVO) {
if (userVm == null || serviceOfferingVO == null) {
LOGGER.error("Failed to publish usage records during VM import");
LOGGER.error(String.format("Failed to publish usage records during VM import because VM [%s] or ServiceOffering [%s] is null.", userVm, serviceOfferingVO));
cleanupFailedImportVM(userVm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm during publishing usage records"));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "VM import failed for Unmanaged VM during publishing Usage Records.");
}
try {
if (!serviceOfferingVO.isDynamic()) {
@ -874,13 +861,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
userVm.getHypervisorType().toString(), VirtualMachine.class.getName(), userVm.getUuid(), userVm.isDisplayVm());
}
} catch (Exception e) {
LOGGER.error(String.format("Failed to publish usage records during VM import for unmanaged vm %s", userVm.getInstanceName()), e);
LOGGER.error(String.format("Failed to publish usage records during VM import for unmanaged VM [%s] due to [%s].", userVm.getInstanceName(), e.getMessage()), e);
cleanupFailedImportVM(userVm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm %s during publishing usage records", userVm.getInstanceName()));
}
resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.user_vm, userVm.isDisplayVm());
resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.cpu, userVm.isDisplayVm(), new Long(serviceOfferingVO.getCpu()));
resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.memory, userVm.isDisplayVm(), new Long(serviceOfferingVO.getRamSize()));
resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.cpu, userVm.isDisplayVm(), Long.valueOf(serviceOfferingVO.getCpu()));
resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.memory, userVm.isDisplayVm(), Long.valueOf(serviceOfferingVO.getRamSize()));
// Save usage event and update resource count for user vm volumes
List<VolumeVO> volumes = volumeDao.findByInstance(userVm.getId());
for (VolumeVO volume : volumes) {
@ -911,14 +898,17 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
final ServiceOfferingVO serviceOffering, final Map<String, Long> dataDiskOfferingMap,
final Map<String, Long> nicNetworkMap, final Map<String, Network.IpAddresses> callerNicIpAddressMap,
final Map<String, String> details, final boolean migrateAllowed, final boolean forced) {
LOGGER.debug(LogUtils.logGsonWithoutException("Trying to import VM [%s] with name [%s], in zone [%s], cluster [%s], and host [%s], using template [%s], service offering [%s], disks map [%s], NICs map [%s] and details [%s].",
unmanagedInstance, instanceName, zone, cluster, host, template, serviceOffering, dataDiskOfferingMap, nicNetworkMap, details));
UserVm userVm = null;
ServiceOfferingVO validatedServiceOffering = null;
try {
validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details);
} catch (Exception e) {
LOGGER.error("Service offering for VM import not compatible", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import VM: %s. %s", unmanagedInstance.getName(), StringUtils.defaultString(e.getMessage())));
String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance.getName(), serviceOffering.getUuid(), StringUtils.defaultIfEmpty(e.getMessage(), ""));
LOGGER.error(errorMsg, e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg);
}
String internalCSName = unmanagedInstance.getInternalCSName();
@ -950,9 +940,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
allDetails.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDisk.getController());
try {
checkUnmanagedDiskAndOfferingForImport(rootDisk, null, validatedServiceOffering, owner, zone, cluster, migrateAllowed);
checkUnmanagedDiskAndOfferingForImport(unmanagedInstance.getName(), rootDisk, null, validatedServiceOffering, owner, zone, cluster, migrateAllowed);
if (CollectionUtils.isNotEmpty(dataDisks)) { // Data disk(s) present
checkUnmanagedDiskAndOfferingForImport(dataDisks, dataDiskOfferingMap, owner, zone, cluster, migrateAllowed);
checkUnmanagedDiskAndOfferingForImport(unmanagedInstance.getName(), dataDisks, dataDiskOfferingMap, owner, zone, cluster, migrateAllowed);
allDetails.put(VmDetailConstants.DATA_DISK_CONTROLLER, dataDisks.get(0).getController());
}
resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.volume, unmanagedInstanceDisks.size());
@ -962,7 +952,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
// Check NICs and supplied networks
Map<String, Network.IpAddresses> nicIpAddressMap = getNicIpAddresses(unmanagedInstance.getNics(), callerNicIpAddressMap);
Map<String, Long> allNicNetworkMap = getUnmanagedNicNetworkMap(unmanagedInstance.getNics(), nicNetworkMap, nicIpAddressMap, zone, hostName, owner);
Map<String, Long> allNicNetworkMap = getUnmanagedNicNetworkMap(unmanagedInstance.getName(), unmanagedInstance.getNics(), nicNetworkMap, nicIpAddressMap, zone, hostName, owner);
if (!CollectionUtils.isEmpty(unmanagedInstance.getNics())) {
allDetails.put(VmDetailConstants.NIC_ADAPTER, unmanagedInstance.getNics().get(0).getAdapterType());
}
@ -976,8 +966,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
validatedServiceOffering, null, hostName,
cluster.getHypervisorType(), allDetails, powerState);
} catch (InsufficientCapacityException ice) {
LOGGER.error(String.format("Failed to import vm name: %s", instanceName), ice);
throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage());
String errorMsg = String.format("Failed to import VM [%s] due to [%s].", instanceName, ice.getMessage());
LOGGER.error(errorMsg, ice);
throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, errorMsg);
}
if (userVm == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName));
@ -1035,23 +1026,29 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
return userVm;
}
@Override
public ListResponse<UnmanagedInstanceResponse> listUnmanagedInstances(ListUnmanagedInstancesCmd cmd) {
private Cluster basicAccessChecks(Long clusterId) {
final Account caller = CallContext.current().getCallingAccount();
if (caller.getType() != Account.Type.ADMIN) {
throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", caller.getUuid()));
throw new PermissionDeniedException(String.format("Cannot perform this operation, caller account [%s] is not ROOT Admin.", caller.getUuid()));
}
final Long clusterId = cmd.getClusterId();
if (clusterId == null) {
throw new InvalidParameterValueException(String.format("Cluster ID cannot be null"));
throw new InvalidParameterValueException("Cluster ID cannot be null.");
}
final Cluster cluster = clusterDao.findById(clusterId);
if (cluster == null) {
throw new InvalidParameterValueException(String.format("Cluster ID: %d cannot be found", clusterId));
throw new InvalidParameterValueException(String.format("Cluster with ID [%d] cannot be found.", clusterId));
}
if (cluster.getHypervisorType() != Hypervisor.HypervisorType.VMware) {
throw new InvalidParameterValueException(String.format("VM ingestion is currently not supported for hypervisor: %s", cluster.getHypervisorType().toString()));
throw new InvalidParameterValueException(String.format("VM import is currently not supported for hypervisor [%s].", cluster.getHypervisorType().toString()));
}
return cluster;
}
@Override
public ListResponse<UnmanagedInstanceResponse> listUnmanagedInstances(ListUnmanagedInstancesCmd cmd) {
Long clusterId = cmd.getClusterId();
Cluster cluster = basicAccessChecks(clusterId);
String keyword = cmd.getKeyword();
if (StringUtils.isNotEmpty(keyword)) {
keyword = keyword.toLowerCase();
@ -1093,25 +1090,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
@Override
public UserVmResponse importUnmanagedInstance(ImportUnmanagedInstanceCmd cmd) {
final Account caller = CallContext.current().getCallingAccount();
if (caller.getType() != Account.Type.ADMIN) {
throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", caller.getUuid()));
}
final Long clusterId = cmd.getClusterId();
if (clusterId == null) {
throw new InvalidParameterValueException(String.format("Cluster ID cannot be null"));
}
final Cluster cluster = clusterDao.findById(clusterId);
if (cluster == null) {
throw new InvalidParameterValueException(String.format("Cluster ID: %d cannot be found", clusterId));
}
if (cluster.getHypervisorType() != Hypervisor.HypervisorType.VMware) {
throw new InvalidParameterValueException(String.format("VM import is currently not supported for hypervisor: %s", cluster.getHypervisorType().toString()));
}
Long clusterId = cmd.getClusterId();
Cluster cluster = basicAccessChecks(clusterId);
final DataCenter zone = dataCenterDao.findById(cluster.getDataCenterId());
final String instanceName = cmd.getName();
if (StringUtils.isEmpty(instanceName)) {
throw new InvalidParameterValueException(String.format("Instance name cannot be empty"));
throw new InvalidParameterValueException("Instance name cannot be empty");
}
if (cmd.getDomainId() != null && StringUtils.isEmpty(cmd.getAccountName())) {
throw new InvalidParameterValueException("domainid parameter must be specified with account parameter");
@ -1140,7 +1125,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
final Long serviceOfferingId = cmd.getServiceOfferingId();
if (serviceOfferingId == null) {
throw new InvalidParameterValueException(String.format("Service offering ID cannot be null"));
throw new InvalidParameterValueException("Service offering ID cannot be null");
}
final ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
if (serviceOffering == null) {
@ -1160,7 +1145,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
String hostName = cmd.getHostName();
if (StringUtils.isEmpty(hostName)) {
if (!NetUtils.verifyDomainNameLabel(instanceName, true)) {
throw new InvalidParameterValueException(String.format("Please provide hostname for the VM. VM name contains unsupported characters for it to be used as hostname"));
throw new InvalidParameterValueException("Please provide a valid hostname for the VM. VM name contains unsupported characters that cannot be used as hostname.");
}
hostName = instanceName;
}
@ -1232,7 +1217,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
template.setGuestOSId(guestOSHypervisor.getGuestOsId());
}
userVm = importVirtualMachineInternal(unmanagedInstance, instanceName, zone, cluster, host,
template, displayName, hostName, caller, owner, userId,
template, displayName, hostName, CallContext.current().getCallingAccount(), owner, userId,
serviceOffering, dataDiskOfferingMap,
nicNetworkMap, nicIpAddressMap,
details, cmd.getMigrateAllowed(), forced);
@ -1316,8 +1301,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
if (hostId == null) {
throw new CloudRuntimeException("Cannot find a host to verify if the VM to unmanage " +
"with id = " + vmVO.getUuid() + " exists.");
throw new CloudRuntimeException(String.format("Cannot find a host to verify if the VM [%s] exists. Thus we are unable to unmanage it.", vmVO.getUuid()));
}
return hostId;
}

View File

@ -159,6 +159,7 @@ import com.cloud.storage.template.TemplateProp;
import com.cloud.storage.template.VhdProcessor;
import com.cloud.storage.template.VmdkProcessor;
import com.cloud.utils.EncryptionUtil;
import com.cloud.utils.LogUtils;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.SwiftUtil;
@ -272,6 +273,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
@Override
public Answer executeRequest(Command cmd) {
s_logger.debug(LogUtils.logGsonWithoutException("Executing command %s [%s].", cmd.getClass().getSimpleName(), cmd));
if (cmd instanceof DownloadProgressCommand) {
return _dlMgr.handleDownloadCommand(this, (DownloadProgressCommand)cmd);
} else if (cmd instanceof DownloadCommand) {
@ -406,13 +408,17 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
NfsTO nfsImageStore = (NfsTO)srcStore;
String secondaryStorageUrl = nfsImageStore.getUrl();
assert (secondaryStorageUrl != null);
String templateUrl = secondaryStorageUrl + File.separator + srcData.getPath();
String templateDetails = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(template, "uuid", "path", "name");
s_logger.debug(String.format("Trying to get disks of template [%s], using path [%s].", templateDetails, templateUrl));
Pair<String, String> templateInfo = decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName());
String templateRelativeFolderPath = templateInfo.first();
try {
String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion);
s_logger.info("MDOVE Secondary storage mount point: " + secondaryMountPoint);
s_logger.info(String.format("Trying to find template [%s] in secondary storage root mount point [%s].", templateDetails, secondaryMountPoint));
String srcOVAFileName = getTemplateOnSecStorageFilePath(secondaryMountPoint, templateRelativeFolderPath, templateInfo.second(), ImageFormat.OVA.getFileExtension());
@ -423,39 +429,46 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
command.add("--no-same-permissions");
command.add("-xf", srcOVAFileName);
command.setWorkDir(secondaryMountPoint + File.separator + templateRelativeFolderPath);
s_logger.info("Executing command: " + command.toString());
s_logger.info(String.format("Trying to decompress OVA file [%s] using command [%s].", srcOVAFileName, command.toString()));
String result = command.execute();
if (result != null) {
String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName;
String msg = String.format("Unable to unpack snapshot OVA file [%s] due to [%s].", srcOVAFileName, result);
s_logger.error(msg);
throw new Exception(msg);
}
String directory = secondaryMountPoint + File.separator + templateRelativeFolderPath;
command = new Script("chmod", 0, s_logger);
command.add("-R");
command.add("666", secondaryMountPoint + File.separator + templateRelativeFolderPath);
command.add("666", directory);
s_logger.debug(String.format("Trying to add, recursivelly, permission 666 to directory [%s] using command [%s].", directory, command.toString()));
result = command.execute();
if (result != null) {
s_logger.warn("Unable to set permissions for " + secondaryMountPoint + File.separator + templateRelativeFolderPath + " due to " + result);
s_logger.warn(String.format("Unable to set permissions 666 for directory [%s] due to [%s].", directory, result));
}
}
Script command = new Script("cp", _timeout, s_logger);
command.add(ovfFilePath);
command.add(ovfFilePath + ORIGINAL_FILE_EXTENSION);
s_logger.debug(String.format("Trying to copy file from [%s] to [%s] using command [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, command.toString()));
String result = command.execute();
if (result != null) {
String msg = "Unable to rename original OVF, error msg: " + result;
String msg = String.format("Unable to copy original OVF file [%s] to [%s] due to [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, result);
s_logger.error(msg);
}
s_logger.debug("Reading OVF " + ovfFilePath + " to retrive the number of disks present in OVA");
s_logger.debug(String.format("Reading OVF file [%s] to retrive the number of disks present in OVA file.", ovfFilePath));
OVFHelper ovfHelper = new OVFHelper();
List<DatadiskTO> disks = ovfHelper.getOVFVolumeInfoFromFile(ovfFilePath, configurationId);
s_logger.debug(LogUtils.logGsonWithoutException("Found %s disks reading OVF file [%s] and using configuration id [%s]. The disks specifications are [%s].",
disks.size(), ovfFilePath, configurationId, disks));
return new GetDatadisksAnswer(disks);
} catch (Exception e) {
String msg = "Get Datadisk Template Count failed due to " + e.getMessage();
String msg = String.format("Failed to get disks from template [%s] due to [%s].", templateDetails, e.getMessage());
s_logger.error(msg, e);
return new GetDatadisksAnswer(msg);
}
@ -584,7 +597,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
* Template url may or may not end with .ova extension
*/
public static Pair<String, String> decodeTemplateRelativePathAndNameFromUrl(String storeUrl, String templateUrl, String defaultName) {
s_logger.debug(String.format("Trying to get template relative path and name from URL [%s].", templateUrl));
String templateName = null;
String mountPoint = null;
if (templateUrl.endsWith(".ova")) {
@ -598,6 +611,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
templateName = templateUrl.substring(index + 1).replace(".ova", "");
if (templateName == null || templateName.isEmpty()) {
s_logger.debug(String.format("Cannot find template name from URL [%s]. Using default name [%s].", templateUrl, defaultName));
templateName = defaultName;
}
} else {
@ -608,11 +622,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
templateName = defaultName;
}
s_logger.debug(String.format("Template relative path [%s] and name [%s] found from URL [%s].", mountPoint, templateName, templateUrl));
return new Pair<String, String>(mountPoint, templateName);
}
public static String getTemplateOnSecStorageFilePath(String secStorageMountPoint, String templateRelativeFolderPath, String templateName, String fileExtension) {
s_logger.debug(String.format("Trying to find template [%s] with file extension [%s] in secondary storage mount point [%s] using relative folder path [%s].",
templateName, fileExtension, secStorageMountPoint, templateRelativeFolderPath));
StringBuffer sb = new StringBuffer();
sb.append(secStorageMountPoint);
if (!secStorageMountPoint.endsWith("/")) {
@ -699,17 +715,27 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
}
private String getOVFFilePath(String srcOVAFileName) {
s_logger.debug(String.format("Trying to get OVF file from OVA path [%s].", srcOVAFileName));
File file = new File(srcOVAFileName);
assert (_storage != null);
String[] files = _storage.listFiles(file.getParent());
if (files != null) {
for (String fileName : files) {
if (fileName.toLowerCase().endsWith(".ovf")) {
File ovfFile = new File(fileName);
return file.getParent() + File.separator + ovfFile.getName();
}
if (files == null) {
s_logger.warn(String.format("Cannot find any files in parent directory [%s] of OVA file [%s].", file.getParent(), srcOVAFileName));
return null;
}
s_logger.debug(String.format("Found [%s] files in parent directory of OVA file [%s]. Files found are [%s].", files.length + 1, file.getParent(), StringUtils.join(files, ", ")));
for (String fileName : files) {
if (fileName.toLowerCase().endsWith(".ovf")) {
File ovfFile = new File(fileName);
String ovfFilePath = file.getParent() + File.separator + ovfFile.getName();
s_logger.debug(String.format("Found OVF file [%s] from OVA file [%s].", ovfFilePath, srcOVAFileName));
return ovfFilePath;
}
}
s_logger.warn(String.format("Cannot find any OVF file in parent directory [%s] of OVA file [%s].", file.getParent(), srcOVAFileName));
return null;
}
@ -2050,6 +2076,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
s_logger.warn(details);
return new Answer(cmd, false, details);
}
// delete the directory if it is empty
if (snapshotDir.isDirectory() && snapshotDir.list().length == 0 && !snapshotDir.delete()) {
details = String.format("Unable to delete directory [%s] at path [%s].", snapshotDir.getName(), snapshotPath);
s_logger.debug(details);
return new Answer(cmd, false, details);
}
return new Answer(cmd, true, null);
} else if (dstore instanceof S3TO) {
final S3TO s3 = (S3TO)dstore;
@ -2616,13 +2649,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
return _parent;
}
try {
s_logger.debug(String.format("Trying to get root directory from secondary storage URL [%s] using NFS version [%s].", secUrl, nfsVersion));
URI uri = new URI(secUrl);
String dir = mountUri(uri, nfsVersion);
return _parent + "/" + dir;
} catch (Exception e) {
String msg = "GetRootDir for " + secUrl + " failed due to " + e.toString();
s_logger.error(msg, e);
throw new CloudRuntimeException(msg);
String msg = String.format("Failed to get root directory from secondary storage URL [%s], using NFS version [%s], due to [%s].", secUrl, nfsVersion, e.getMessage());
throw new CloudRuntimeException(msg, e);
}
}

View File

@ -668,8 +668,12 @@ class CsIP:
logging.info("Not making dns publicly available")
if self.config.has_metadata():
app = CsApache(self)
app.setup()
if method == "add":
app = CsApache(self)
app.setup()
elif method == "delete":
app = CsApache(self)
app.remove()
# If redundant then this is dealt with
# by the primary backup functions

View File

@ -34,7 +34,7 @@ class CsApache(CsApp):
""" Set up Apache """
def remove(self):
file = "/etc/apache2/sites-enabled/vhost-%s.conf" % self.dev
file = "/etc/apache2/sites-enabled/vhost-%s.conf" % self.ip
if os.path.isfile(file):
os.remove(file)
CsHelper.service("apache2", "restart")

View File

@ -121,7 +121,7 @@ class TestSSVMs(cloudstackTestCase):
# should return only ONE SSVM per zone
# 2. The returned SSVM should be in Running state
# 3. listSystemVM for secondarystoragevm should list publicip,
# privateip and link-localip
# privateip, link-localip and service offering id/name
# 4. The gateway programmed on the ssvm by listSystemVm should be
# the same as the gateway returned by listVlanIpRanges
# 5. DNS entries must match those given for the zone
@ -188,6 +188,18 @@ class TestSSVMs(cloudstackTestCase):
"Check whether SSVM has public IP field"
)
self.assertEqual(
hasattr(ssvm, 'serviceofferingid'),
True,
"Check whether SSVM has service offering id field"
)
self.assertEqual(
hasattr(ssvm, 'serviceofferingname'),
True,
"Check whether SSVM has service offering name field"
)
# Fetch corresponding ip ranges information from listVlanIpRanges
ipranges_response = list_vlan_ipranges(
self.apiclient,
@ -261,8 +273,8 @@ class TestSSVMs(cloudstackTestCase):
# 1. listSystemVM (systemvmtype=consoleproxy) should return
# at least ONE CPVM per zone
# 2. The returned ConsoleProxyVM should be in Running state
# 3. listSystemVM for console proxy should list publicip, privateip
# and link-localip
# 3. listSystemVM for console proxy should list publicip, privateip,
# link-localip and service offering id/name
# 4. The gateway programmed on the console proxy should be the same
# as the gateway returned by listZones
# 5. DNS entries must match those given for the zone
@ -327,6 +339,18 @@ class TestSSVMs(cloudstackTestCase):
True,
"Check whether CPVM has public IP field"
)
self.assertEqual(
hasattr(cpvm, 'serviceofferingid'),
True,
"Check whether CPVM has service offering id field"
)
self.assertEqual(
hasattr(cpvm, 'serviceofferingname'),
True,
"Check whether CPVM has service offering name field"
)
# Fetch corresponding ip ranges information from listVlanIpRanges
ipranges_response = list_vlan_ipranges(
self.apiclient,

View File

@ -517,6 +517,7 @@
"label.copy.clipboard": "Copy to clipboard",
"label.copy.consoleurl": "Copy console URL to clipboard",
"label.copyid": "Copy ID",
"label.copy.password": "Copy password",
"label.core": "Core",
"label.core.zone.type": "Core zone type",
"label.counter": "Counter",

View File

@ -199,7 +199,8 @@ export default {
created () {
this.menus = this.mainMenu.find((item) => item.path === '/').children
this.collapsed = !this.sidebarOpened
setInterval(this.checkShutdown, 5000)
const readyForShutdownPollingJob = setInterval(this.checkShutdown, 5000)
this.$store.commit('SET_READY_FOR_SHUTDOWN_POLLING_JOB', readyForShutdownPollingJob)
},
mounted () {
const layoutMode = this.$config.theme['@layout-mode'] || 'light'

View File

@ -523,7 +523,7 @@
<div class="resource-detail-item__label">{{ $t('label.serviceofferingname') }}</div>
<div class="resource-detail-item__details">
<cloud-outlined />
<router-link v-if="!isStatic && $route.meta.name === 'router'" :to="{ path: '/computeoffering/' + resource.serviceofferingid, query: { issystem: true } }">{{ resource.serviceofferingname || resource.serviceofferingid }} </router-link>
<router-link v-if="!isStatic && ($route.meta.name === 'router' || $route.meta.name === 'systemvm')" :to="{ path: '/systemoffering/' + resource.serviceofferingid}">{{ resource.serviceofferingname || resource.serviceofferingid }} </router-link>
<router-link v-else-if="$router.resolve('/computeoffering/' + resource.serviceofferingid).matched[0].redirect !== '/exception/404'" :to="{ path: '/computeoffering/' + resource.serviceofferingid }">{{ resource.serviceofferingname || resource.serviceofferingid }} </router-link>
<span v-else>{{ resource.serviceofferingname || resource.serviceofferingid }}</span>
</div>

View File

@ -370,7 +370,13 @@ export default {
message: 'message.action.instance.reset.password',
dataView: true,
show: (record) => { return ['Stopped'].includes(record.state) && record.passwordenabled },
response: (result) => { return result.virtualmachine && result.virtualmachine.password ? `The password of VM <b>${result.virtualmachine.displayname}</b> is <b>${result.virtualmachine.password}</b>` : null }
response: (result) => {
return {
message: result.virtualmachine && result.virtualmachine.password ? `The password of VM <b>${result.virtualmachine.displayname}</b> is <b>${result.virtualmachine.password}</b>` : null,
copybuttontext: result.virtualmachine.password ? 'label.copy.password' : null,
copytext: result.virtualmachine.password ? result.virtualmachine.password : null
}
}
},
{
api: 'resetSSHKeyForVirtualMachine',

View File

@ -50,7 +50,8 @@ const getters = {
twoFaIssuer: state => state.user.twoFaIssuer,
loginFlag: state => state.user.loginFlag,
allProjects: (state) => state.app.allProjects,
customHypervisorName: state => state.user.customHypervisorName
customHypervisorName: state => state.user.customHypervisorName,
readyForShutdownPollingJob: state => state.user.readyForShutdownPollingJob
}
export default getters

View File

@ -130,6 +130,9 @@ const app = {
},
SET_SHUTDOWN_TRIGGERED: (state, shutdownTriggered) => {
state.shutdownTriggered = shutdownTriggered
},
SET_READY_FOR_SHUTDOWN_POLLING_JOB: (state, readyForShutdownPollingJob) => {
state.readyForShutdownPollingJob = readyForShutdownPollingJob
}
},
actions: {
@ -192,6 +195,9 @@ const app = {
},
SetShutdownTriggered ({ commit }, bool) {
commit('SET_SHUTDOWN_TRIGGERED', bool)
},
SetReadyForShutdownPollingJob ({ commit }, job) {
commit('SET_READY_FOR_SHUTDOWN_POLLING_JOB', job)
}
}
}

View File

@ -65,7 +65,8 @@ const user = {
twoFaEnabled: false,
twoFaProvider: '',
twoFaIssuer: '',
customHypervisorName: 'Custom'
customHypervisorName: 'Custom',
readyForShutdownPollingJob: ''
},
mutations: {
@ -155,6 +156,9 @@ const user = {
},
SET_CUSTOM_HYPERVISOR_NAME (state, name) {
state.customHypervisorName = name
},
SET_READY_FOR_SHUTDOWN_POLLING_JOB: (state, job) => {
state.readyForShutdownPollingJob = job
}
},

View File

@ -444,7 +444,8 @@
</template>
<script>
import { ref, reactive, toRaw } from 'vue'
import { ref, reactive, toRaw, h } from 'vue'
import { Button } from 'ant-design-vue'
import { api } from '@/api'
import { mixinDevice } from '@/utils/mixin.js'
import { genericCompare } from '@/utils/sort.js'
@ -1301,13 +1302,30 @@ export default {
eventBus.emit('update-resource-state', { selectedItems: this.selectedItems, resource, state: 'success' })
}
if (action.response) {
const description = action.response(result.jobresult)
if (description) {
this.$notification.info({
message: this.$t(action.label),
description: (<span v-html={description}></span>),
duration: 0
})
const response = action.response(result.jobresult)
if (response) {
if (typeof response === 'object') {
this.$notification.info({
message: this.$t(action.label),
description: (<span v-html={response.message}></span>),
btn: () => h(
Button,
{
type: 'primary',
size: 'small',
onClick: () => this.copyToClipboard(response.copytext)
},
() => [this.$t(response.copybuttontext)]
),
duration: 0
})
} else {
this.$notification.info({
message: this.$t(action.label),
description: (<span v-html={response}></span>),
duration: 0
})
}
}
}
if ('successMethod' in action) {
@ -1903,6 +1921,14 @@ export default {
if (screenWidth <= 768) {
this.modalWidth = '450px'
}
},
copyToClipboard (txt) {
const parent = this
this.$copyText(txt, document.body, function (err) {
if (!err) {
parent.$message.success(parent.$t('label.copied.clipboard'))
}
})
}
}
}

View File

@ -189,6 +189,9 @@ export default {
}
this.initForm()
if (store.getters.logoutFlag) {
if (store.getters.readyForShutdownPollingJob !== '' || store.getters.readyForShutdownPollingJob !== undefined) {
clearInterval(store.getters.readyForShutdownPollingJob)
}
sourceToken.init()
this.fetchData()
} else {

View File

@ -840,7 +840,8 @@
</template>
<script>
import { ref, reactive, toRaw, nextTick } from 'vue'
import { ref, reactive, toRaw, nextTick, h } from 'vue'
import { Button } from 'ant-design-vue'
import { api } from '@/api'
import _ from 'lodash'
import { mixin, mixinDevice } from '@/utils/mixin.js'
@ -2191,6 +2192,15 @@ export default {
this.$notification.success({
message: password + ` ${this.$t('label.for')} ` + name,
description: vm.password,
btn: () => h(
Button,
{
type: 'primary',
size: 'small',
onClick: () => this.copyToClipboard(vm.password)
},
() => [this.$t('label.copy.password')]
),
duration: 0
})
}
@ -2690,6 +2700,14 @@ export default {
}
}
return networks
},
copyToClipboard (txt) {
const parent = this
this.$copyText(txt, document.body, function (err) {
if (!err) {
parent.$message.success(parent.$t('label.copied.clipboard'))
}
})
}
}
}

View File

@ -109,7 +109,7 @@
<tooltip-label :title="$t('label.domainid')" :tooltip="apiParams.domainid.description"/>
</template>
<a-select
:loading="domainLoading"
:loading="domain.loading"
v-model:value="form.domainid"
:placeholder="apiParams.domainid.description"
showSearch
@ -207,7 +207,7 @@ export default {
this.fetchTimeZone = debounce(this.fetchTimeZone, 800)
return {
loading: false,
domainLoading: false,
domain: { loading: false },
domainsList: [],
roleLoading: false,
roles: [],
@ -282,21 +282,28 @@ export default {
}
},
fetchDomains () {
this.domainLoading = true
api('listDomains', {
listAll: true,
showicon: true,
details: 'min'
}).then(response => {
this.domainsList = response.listdomainsresponse.domain || []
this.form.domain = this.domainsList[0].id || ''
}).catch(error => {
this.$notification.error({
message: `${this.$t('label.error')} ${error.response.status}`,
description: error.response.data.errorresponse.errortext
})
this.domain.loading = true
this.loadMore('listDomains', 1, this.domain)
},
loadMore (apiToCall, page, sema) {
console.log('sema.loading ' + sema.loading)
const params = {}
params.listAll = true
params.details = 'min'
params.pagesize = 100
params.page = page
var count
api(apiToCall, params).then(json => {
const listDomains = json.listdomainsresponse.domain
count = json.listdomainsresponse.count
this.domainsList = this.domainsList.concat(listDomains)
}).finally(() => {
this.domainLoading = false
if (count <= this.domainsList.length) {
sema.loading = false
} else {
this.loadMore(apiToCall, page + 1, sema)
}
this.form.domainid = 0
})
},
fetchRoles () {

View File

@ -77,7 +77,7 @@
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="domainLoading"
:loading="domain.loading"
:placeholder="apiParams.domainid.description"
@change="val => { handleDomainChange(domains[val]) }">
<a-select-option v-for="(opt, optIndex) in domains" :key="optIndex" :label="opt.path || opt.name || opt.description">
@ -375,7 +375,7 @@ export default {
return {
actionLoading: false,
domains: [],
domainLoading: false,
domain: { loading: false },
selectedDomain: {},
accountVisible: isAdminOrDomainAdmin(),
accounts: [],
@ -497,15 +497,26 @@ export default {
this.updateVPCCheckAndFetchNetworkOfferingData()
},
fetchDomainData () {
this.domain.loading = true
this.loadMore('listDomains', 1, this.domain)
},
loadMore (apiToCall, page, sema) {
const params = {}
params.listAll = true
params.details = 'min'
this.domainLoading = true
api('listDomains', params).then(json => {
params.pagesize = 100
params.page = page
var count
api(apiToCall, params).then(json => {
const listDomains = json.listdomainsresponse.domain
count = json.listdomainsresponse.count
this.domains = this.domains.concat(listDomains)
}).finally(() => {
this.domainLoading = false
if (count <= this.domains.length) {
sema.loading = false
} else {
this.loadMore(apiToCall, page + 1, sema)
}
this.form.domainid = 0
this.handleDomainChange(this.domains[0])
})