VMware Datastore Cluster primary storage pool synchronisation (#4871)

Datastore cluster as a primary storage support is already there. But if any changes at vCenter to datastore cluster like addition/removal of datastore is not synchronised with CloudStack directly. It needs removal of primary storage from CloudStack and add it again to CloudStack.

Here synchronisation of datastore cluster is fixed without need to remove or add the datastore cluster.
1. A new API is introduced syncStoragePool which takes datastore cluster storage pool UUID as the parameter. This API checks if there any changes in the datastore cluster and updates management server accordingly.
2. During synchronisation if a new child datastore is found in datastore cluster, then management server will create a new child storage pool in database under the datastore cluster. If the new child storage pool is already added as an individual storage pool then the existing storage pool entry will be converted to child storage pool (instead of creating a new storage pool entry)
3. During synchronisaton if the existing child datastore in CloudStack is found to be removed on vCenter then management server removes that child datastore from datastore cluster and makes it an individual storage pool.
The above behaviour is on par with the vCenter behaviour when adding and removing child datastore.
This commit is contained in:
Harikrishna 2021-05-07 16:30:54 +05:30 committed by GitHub
parent 1eea9c5939
commit 32e3bbdcc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 658 additions and 57 deletions

View File

@ -374,6 +374,7 @@ public class EventTypes {
// Primary storage pool
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
// VPN
public static final String EVENT_REMOTE_ACCESS_VPN_CREATE = "VPN.REMOTE.ACCESS.CREATE";

View File

@ -27,6 +27,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InsufficientCapacityException;
@ -104,4 +105,6 @@ public interface StorageService {
ImageStore updateImageStoreStatus(Long id, Boolean readonly);
StoragePool syncStoragePool(SyncStoragePoolCmd cmd);
}

View File

@ -0,0 +1,97 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.storage;
import com.cloud.event.EventTypes;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.storage.StoragePool;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.context.CallContext;
import java.util.logging.Logger;
@APICommand(name = SyncStoragePoolCmd.APINAME,
description = "Sync storage pool with management server (currently supported for Datastore Cluster in VMware and syncs the datastores in it)",
responseObject = StoragePoolResponse.class,
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = false,
since = "4.15.1",
authorized = {RoleType.Admin}
)
public class SyncStoragePoolCmd extends BaseAsyncCmd {
public static final String APINAME = "syncStoragePool";
public static final Logger LOGGER = Logger.getLogger(SyncStoragePoolCmd.class.getName());
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "Storage pool id")
private Long poolId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getPoolId() {
return poolId;
}
@Override
public String getEventType() {
return EventTypes.EVENT_SYNC_STORAGE_POOL;
}
@Override
public String getEventDescription() {
return "Attempting to synchronise storage pool with management server";
}
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
StoragePool result = _storageService.syncStoragePool(this);
if (result != null) {
StoragePoolResponse response = _responseGenerator.createStoragePoolResponse(result);
response.setResponseName("storagepool");
this.setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to synchronise storage pool");
}
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseAsyncCmd.RESPONSE_SUFFIX;
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccountId();
}
}

View File

@ -30,6 +30,7 @@ import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import com.cloud.agent.api.Answer;
@ -81,5 +82,7 @@ public interface StorageProcessor {
Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd);
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd);
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd);
public Answer syncVolumePath(SyncVolumePathCommand cmd);
}

View File

@ -34,6 +34,7 @@ import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
@ -73,7 +74,9 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
} else if (command instanceof DirectDownloadCommand) {
return processor.handleDownloadTemplateToPrimaryStorage((DirectDownloadCommand) command);
} else if (command instanceof CheckDataStoreStoragePolicyComplainceCommand) {
return processor.CheckDataStoreStoragePolicyComplaince((CheckDataStoreStoragePolicyComplainceCommand) command);
return processor.checkDataStoreStoragePolicyCompliance((CheckDataStoreStoragePolicyComplainceCommand) command);
} else if (command instanceof SyncVolumePathCommand) {
return processor.syncVolumePath((SyncVolumePathCommand) command);
}
return new Answer((Command)command, false, "not implemented yet");

View File

@ -0,0 +1,49 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.storage.command;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DiskTO;
public class SyncVolumePathAnswer extends Answer {
private DiskTO disk;
public SyncVolumePathAnswer() {
super(null);
}
public SyncVolumePathAnswer(DiskTO disk) {
super(null);
setDisk(disk);
}
public SyncVolumePathAnswer(String errMsg) {
super(null, false, errMsg);
}
public DiskTO getDisk() {
return disk;
}
public void setDisk(DiskTO disk) {
this.disk = disk;
}
}

View File

@ -0,0 +1,49 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.storage.command;
import com.cloud.agent.api.to.DiskTO;
public class SyncVolumePathCommand extends StorageSubSystemCommand {
private DiskTO disk;
public SyncVolumePathCommand(final DiskTO disk) {
super();
this.disk = disk;
}
public DiskTO getDisk() {
return disk;
}
public void setDisk(final DiskTO disk) {
this.disk = disk;
}
@Override
public boolean executeInSequence() {
return false;
}
@Override
public void setExecuteInSequence(boolean inSeq) {
}
}

View File

@ -19,6 +19,7 @@ package com.cloud.storage;
import java.math.BigDecimal;
import java.util.List;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.framework.config.ConfigKey;
@ -240,4 +241,6 @@ public interface StorageManager extends StorageService {
boolean isStoragePoolDatastoreClusterParent(StoragePool pool);
void syncDatastoreClusterStoragePool(long datastoreClusterPoolId, List<ModifyStoragePoolAnswer> childDatastoreAnswerList, long hostId);
}

View File

@ -32,6 +32,8 @@ public interface StoragePoolHostDao extends GenericDao<StoragePoolHostVO, Long>
List<StoragePoolHostVO> listByHostStatus(long poolId, Status hostStatus);
List<Long> findHostsConnectedToPools(List<Long> poolIds);
List<Pair<Long, Integer>> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly);
public void deletePrimaryRecordsForHost(long hostId);

View File

@ -21,7 +21,7 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -44,6 +44,8 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
protected static final String HOST_FOR_POOL_SEARCH = "SELECT * FROM storage_pool_host_ref ph, host h where ph.host_id = h.id and ph.pool_id=? and h.status=? ";
protected static final String HOSTS_FOR_POOLS_SEARCH = "SELECT DISTINCT(ph.host_id) FROM storage_pool_host_ref ph, host h WHERE ph.host_id = h.id AND h.status = 'Up' AND resource_state = 'Enabled' AND ph.pool_id IN (?)";
protected static final String STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph "
+ " WHERE p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id";
@ -121,6 +123,33 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
return result;
}
@Override
public List<Long> findHostsConnectedToPools(List<Long> poolIds) {
List<Long> hosts = new ArrayList<Long>();
if (poolIds == null || poolIds.isEmpty()) {
return hosts;
}
String poolIdsInStr = poolIds.stream().map(poolId -> String.valueOf(poolId)).collect(Collectors.joining(",", "(", ")"));
String sql = HOSTS_FOR_POOLS_SEARCH.replace("(?)", poolIdsInStr);
TransactionLegacy txn = TransactionLegacy.currentTxn();
try(PreparedStatement pstmt = txn.prepareStatement(sql);) {
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
long hostId = rs.getLong(1); // host_id column
hosts.add(hostId);
}
} catch (SQLException e) {
s_logger.warn(String.format("Unable to retrieve hosts from pools [%s] due to [%s].", poolIdsInStr, e.getMessage()));
}
} catch (Exception e) {
s_logger.warn(String.format("Unable to retrieve hosts from pools [%s] due to [%s].", poolIdsInStr, e.getMessage()));
}
return hosts;
}
@Override
public List<Pair<Long, Integer>> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) {
ArrayList<Pair<Long, Integer>> l = new ArrayList<Pair<Long, Integer>>();

View File

@ -130,4 +130,6 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
Integer countAll();
List<StoragePoolVO> findPoolsByStorageType(String storageType);
}

View File

@ -93,6 +93,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ);
AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ);
AllFieldSearch.and("storage_provider_name", AllFieldSearch.entity().getStorageProviderName(), Op.EQ);
AllFieldSearch.and("poolType", AllFieldSearch.entity().getPoolType(), Op.EQ);
AllFieldSearch.done();
DcPodSearch = createSearchBuilder();
@ -581,4 +582,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.setParameters("status", StoragePoolStatus.Up);
return listBy(sc);
}
@Override
public List<StoragePoolVO> findPoolsByStorageType(String storageType) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("poolType", storageType);
return listBy(sc);
}
}

View File

@ -22,16 +22,14 @@ import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.alert.AlertManager;
import com.cloud.exception.StorageConflictException;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.StorageManager;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
@ -43,9 +41,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DefaultHostListener implements HypervisorHostListener {
private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class);
@ -62,7 +58,7 @@ public class DefaultHostListener implements HypervisorHostListener {
@Inject
StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
StoragePoolTagsDao storagePoolTagsDao;
StorageManager storageManager;
@Override
public boolean hostAdded(long hostId) {
@ -104,43 +100,7 @@ public class DefaultHostListener implements HypervisorHostListener {
updateStoragePoolHostVOAndDetails(poolVO, hostId, mspAnswer);
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
for (ModifyStoragePoolAnswer childDataStoreAnswer : ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren()) {
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
StoragePoolVO dataStoreVO = primaryStoreDao.findPoolByUUID(childStoragePoolInfo.getUuid());
if (dataStoreVO != null) {
continue;
}
dataStoreVO = new StoragePoolVO();
dataStoreVO.setStorageProviderName(poolVO.getStorageProviderName());
dataStoreVO.setHostAddress(childStoragePoolInfo.getHost());
dataStoreVO.setPoolType(Storage.StoragePoolType.PreSetup);
dataStoreVO.setPath(childStoragePoolInfo.getHostPath());
dataStoreVO.setPort(poolVO.getPort());
dataStoreVO.setName(childStoragePoolInfo.getName());
dataStoreVO.setUuid(childStoragePoolInfo.getUuid());
dataStoreVO.setDataCenterId(poolVO.getDataCenterId());
dataStoreVO.setPodId(poolVO.getPodId());
dataStoreVO.setClusterId(poolVO.getClusterId());
dataStoreVO.setStatus(StoragePoolStatus.Up);
dataStoreVO.setUserInfo(poolVO.getUserInfo());
dataStoreVO.setManaged(poolVO.isManaged());
dataStoreVO.setCapacityIops(poolVO.getCapacityIops());
dataStoreVO.setCapacityBytes(childDataStoreAnswer.getPoolInfo().getCapacityBytes());
dataStoreVO.setUsedBytes(childDataStoreAnswer.getPoolInfo().getCapacityBytes() - childDataStoreAnswer.getPoolInfo().getAvailableBytes());
dataStoreVO.setHypervisor(poolVO.getHypervisor());
dataStoreVO.setScope(poolVO.getScope());
dataStoreVO.setParent(poolVO.getId());
Map<String, String> details = new HashMap<>();
if(StringUtils.isNotEmpty(childDataStoreAnswer.getPoolType())) {
details.put("pool_type", childDataStoreAnswer.getPoolType());
}
List<String> storageTags = storagePoolTagsDao.getStoragePoolTags(poolId);
primaryStoreDao.persist(dataStoreVO, details, storageTags);
updateStoragePoolHostVOAndDetails(dataStoreVO, hostId, childDataStoreAnswer);
}
storageManager.syncDatastoreClusterStoragePool(poolId, ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), hostId);
}
s_logger.info("Connection established between storage pool " + pool + " and host " + hostId);

View File

@ -60,6 +60,7 @@ import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
@ -1838,8 +1839,14 @@ public class KVMStorageProcessor implements StorageProcessor {
}
@Override
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) {
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) {
s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not currently applicable for KVMStorageProcessor");
return new Answer(cmd,false,"Not currently applicable for KVMStorageProcessor");
}
@Override
public Answer syncVolumePath(SyncVolumePathCommand cmd) {
s_logger.info("SyncVolumePathCommand not currently applicable for KVMStorageProcessor");
return new Answer(cmd, false, "Not currently applicable for KVMStorageProcessor");
}
}

View File

@ -35,8 +35,9 @@ import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
@ -828,11 +829,17 @@ public class Ovm3StorageProcessor implements StorageProcessor {
}
@Override
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) {
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) {
LOGGER.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for Ovm3StorageProcessor");
return new Answer(cmd,false,"Not applicable used for Ovm3StorageProcessor");
}
@Override
public Answer syncVolumePath(SyncVolumePathCommand cmd) {
LOGGER.info("SyncVolumePathCommand not currently applicable for Ovm3StorageProcessor");
return new Answer(cmd, false, "Not currently applicable for Ovm3StorageProcessor");
}
@Override
public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) {
return null;

View File

@ -41,6 +41,7 @@ import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
@ -272,7 +273,12 @@ public class SimulatorStorageProcessor implements StorageProcessor {
}
@Override
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) {
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) {
return new Answer(cmd, true, null);
}
@Override
public Answer syncVolumePath(SyncVolumePathCommand cmd) {
return new Answer(cmd, true, null);
}
}

View File

@ -5272,10 +5272,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
String datacenterName = datastoreClusterPath.substring(0, pathstartPosition+1);
String childPath = datacenterName + summary.getName();
poolInfo.setHostPath(childPath);
String uuid = UUID.nameUUIDFromBytes(((pool.getHost() + childPath)).getBytes()).toString();
String uuid = childDsMo.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID);
if (uuid == null) {
uuid = UUID.nameUUIDFromBytes(((pool.getHost() + childPath)).getBytes()).toString();
}
poolInfo.setUuid(uuid);
poolInfo.setLocalPath(cmd.LOCAL_PATH_PREFIX + File.separator + uuid);
answer.setPoolInfo(poolInfo);
answer.setPoolType(summary.getType());
answer.setLocalDatastoreName(morDatastore.getValue());

View File

@ -50,6 +50,8 @@ import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathAnswer;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
@ -3892,7 +3894,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
}
@Override
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) {
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) {
String primaryStorageNameLabel = cmd.getStoragePool().getUuid();
String storagePolicyId = cmd.getStoragePolicyId();
VmwareContext context = hostService.getServiceContext(cmd);
@ -3962,4 +3964,81 @@ public class VmwareStorageProcessor implements StorageProcessor {
throw new CloudRuntimeException(msg, e);
}
}
@Override
public Answer syncVolumePath(SyncVolumePathCommand cmd) {
DiskTO disk = cmd.getDisk();
VolumeObjectTO volumeTO = (VolumeObjectTO)disk.getData();
DataStoreTO primaryStore = volumeTO.getDataStore();
String volumePath = volumeTO.getPath();
String vmName = volumeTO.getVmName();
boolean datastoreChangeObserved = false;
boolean volumePathChangeObserved = false;
String chainInfo = null;
try {
VmwareContext context = hostService.getServiceContext(null);
VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null);
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
if (vmMo == null) {
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
if (vmMo == null) {
String msg = "Unable to find the VM to execute SyncVolumePathCommand, vmName: " + vmName;
s_logger.error(msg);
throw new Exception(msg);
}
}
String datastoreUUID = primaryStore.getUuid();
if (disk.getDetails().get(DiskTO.PROTOCOL_TYPE) != null && disk.getDetails().get(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase("DatastoreCluster")) {
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(hyperHost, context, vmMo, disk);
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
if (diskInfoBuilder != null && matchingExistingDisk != null) {
String[] diskChain = matchingExistingDisk.getDiskChain();
assert (diskChain.length > 0);
DatastoreFile file = new DatastoreFile(diskChain[0]);
if (!file.getFileBaseName().equalsIgnoreCase(volumePath)) {
if (s_logger.isInfoEnabled())
s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumePath + " -> " + file.getFileBaseName());
volumePathChangeObserved = true;
volumePath = file.getFileBaseName();
volumeTO.setPath(volumePath);
chainInfo = _gson.toJson(matchingExistingDisk);
}
DatastoreMO diskDatastoreMofromVM = getDiskDatastoreMofromVM(hyperHost, context, vmMo, disk, diskInfoBuilder);
if (diskDatastoreMofromVM != null) {
String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID);
if (!actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) {
s_logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumePath, actualPoolUuid));
datastoreChangeObserved = true;
datastoreUUID = actualPoolUuid;
chainInfo = _gson.toJson(matchingExistingDisk);
}
}
}
}
SyncVolumePathAnswer answer = new SyncVolumePathAnswer(disk);
if (datastoreChangeObserved) {
answer.setContextParam("datastoreName", datastoreUUID);
}
if (volumePathChangeObserved) {
answer.setContextParam("volumePath", volumePath);
}
if (chainInfo != null && !chainInfo.isEmpty()) {
answer.setContextParam("chainInfo", chainInfo);
}
return answer;
} catch (Throwable e) {
if (e instanceof RemoteException) {
s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context");
hostService.invalidateServiceContext(null);
}
return new SyncVolumePathAnswer("Failed to process SyncVolumePathCommand due to " + e.getMessage());
}
}
}

View File

@ -50,6 +50,7 @@ import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
@ -217,11 +218,17 @@ public class XenServerStorageProcessor implements StorageProcessor {
}
@Override
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) {
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) {
s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor");
return new Answer(cmd,false,"Not applicable used for XenServerStorageProcessor");
}
@Override
public Answer syncVolumePath(SyncVolumePathCommand cmd) {
s_logger.info("SyncVolumePathCommand not currently applicable for XenServerStorageProcessor");
return new Answer(cmd, false, "Not currently applicable for XenServerStorageProcessor");
}
@Override
public AttachAnswer attachIso(final AttachCommand cmd) {
final DiskTO disk = cmd.getDisk();

View File

@ -30,6 +30,7 @@ import java.util.UUID;
import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
@ -915,11 +916,17 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
}
@Override
public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) {
public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) {
s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor");
return new Answer(cmd,false,"Not applicable used for XenServerStorageProcessor");
}
@Override
public Answer syncVolumePath(SyncVolumePathCommand cmd) {
s_logger.info("SyncVolumePathCommand not currently applicable for XenServerStorageProcessor");
return new Answer(cmd, false, "Not currently applicable for XenServerStorageProcessor");
}
@Override
public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) {
final Connection conn = hypervisorResource.getConnection();

View File

@ -204,6 +204,7 @@ import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForM
import org.apache.cloudstack.api.command.admin.storage.UpdateCloudToUseObjectStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd;
import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd;
import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd;
@ -3015,6 +3016,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(FindStoragePoolsForMigrationCmd.class);
cmdList.add(PreparePrimaryStorageForMaintenanceCmd.class);
cmdList.add(UpdateStoragePoolCmd.class);
cmdList.add(SyncStoragePoolCmd.class);
cmdList.add(UpdateImageStoreCmd.class);
cmdList.add(DestroySystemVmCmd.class);
cmdList.add(ListSystemVMsCmd.class);

View File

@ -32,6 +32,8 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.LinkedHashSet;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@ -52,6 +54,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -90,6 +93,8 @@ import org.apache.cloudstack.management.ManagementServerHost;
import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.SyncVolumePathAnswer;
import org.apache.cloudstack.storage.command.SyncVolumePathCommand;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
@ -104,6 +109,7 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -114,6 +120,8 @@ import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.manager.Commands;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.dao.TemplateJoinDao;
@ -1552,14 +1560,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (primaryStorage.getStatus() == StoragePoolStatus.PrepareForMaintenance) {
throw new CloudRuntimeException(String.format("There is already a job running for preparation for maintenance of the storage pool %s", primaryStorage.getUuid()));
}
handlePrepareDatastoreCluserMaintenance(lifeCycle, primaryStorageId);
handlePrepareDatastoreClusterMaintenance(lifeCycle, primaryStorageId);
}
lifeCycle.maintain(store);
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
}
private void handlePrepareDatastoreCluserMaintenance(DataStoreLifeCycle lifeCycle, Long primaryStorageId) {
private void handlePrepareDatastoreClusterMaintenance(DataStoreLifeCycle lifeCycle, Long primaryStorageId) {
StoragePoolVO datastoreCluster = _storagePoolDao.findById(primaryStorageId);
datastoreCluster.setStatus(StoragePoolStatus.PrepareForMaintenance);
_storagePoolDao.update(datastoreCluster.getId(), datastoreCluster);
@ -1633,6 +1641,261 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_SYNC_STORAGE_POOL, eventDescription = "synchronising storage pool with management server", async = true)
public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) {
Long poolId = cmd.getPoolId();
StoragePoolVO pool = _storagePoolDao.findById(poolId);
if (pool == null) {
String msg = String.format("Unable to obtain lock on the storage pool record while syncing storage pool [%s] with management server", pool.getUuid());
s_logger.error(msg);
throw new InvalidParameterValueException(msg);
}
if (!pool.getPoolType().equals(StoragePoolType.DatastoreCluster)) {
throw new InvalidParameterValueException("SyncStoragePool API is currently supported only for storage type of datastore cluster");
}
if (!pool.getStatus().equals(StoragePoolStatus.Up)) {
throw new InvalidParameterValueException(String.format("Primary storage with id %s is not ready for syncing, as the status is %s", pool.getUuid(), pool.getStatus().toString()));
}
// find the host
List<Long> poolIds = new ArrayList<>();
poolIds.add(poolId);
List<Long> hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds);
if (hosts.size() > 0) {
Long hostId = hosts.get(0);
ModifyStoragePoolCommand modifyStoragePoolCommand = new ModifyStoragePoolCommand(true, pool);
final Answer answer = _agentMgr.easySend(hostId, modifyStoragePoolCommand);
if (answer == null) {
throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool.getUuid()));
}
if (!answer.getResult()) {
throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to ", pool.getUuid(), hostId, answer.getDetails()));
}
assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" +
pool.getId() + "Host=" + hostId;
ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer;
StoragePoolVO poolVO = _storagePoolDao.findById(poolId);
updateStoragePoolHostVOAndBytes(poolVO, hostId, mspAnswer);
validateChildDatastoresToBeAddedInUpState(poolVO, mspAnswer.getDatastoreClusterChildren());
syncDatastoreClusterStoragePool(poolId, mspAnswer.getDatastoreClusterChildren(), hostId);
for (ModifyStoragePoolAnswer childDataStoreAnswer : mspAnswer.getDatastoreClusterChildren()) {
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
StoragePoolVO dataStoreVO = _storagePoolDao.findPoolByUUID(childStoragePoolInfo.getUuid());
for (Long host : hosts) {
updateStoragePoolHostVOAndBytes(dataStoreVO, host, childDataStoreAnswer);
}
}
} else {
throw new CloudRuntimeException(String.format("Unable to sync storage pool [%s] as there no connected hosts to the storage pool", pool.getUuid()));
}
return (PrimaryDataStoreInfo) _dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
public void syncDatastoreClusterStoragePool(long datastoreClusterPoolId, List<ModifyStoragePoolAnswer> childDatastoreAnswerList, long hostId) {
StoragePoolVO datastoreClusterPool = _storagePoolDao.findById(datastoreClusterPoolId);
List<String> storageTags = _storagePoolTagsDao.getStoragePoolTags(datastoreClusterPoolId);
List<StoragePoolVO> childDatastores = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(datastoreClusterPoolId);
Set<String> childDatastoreUUIDs = new HashSet<>();
for (StoragePoolVO childDatastore : childDatastores) {
childDatastoreUUIDs.add(childDatastore.getUuid());
}
for (ModifyStoragePoolAnswer childDataStoreAnswer : childDatastoreAnswerList) {
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
StoragePoolVO dataStoreVO = _storagePoolDao.findPoolByUUID(childStoragePoolInfo.getUuid());
if (dataStoreVO == null && childDataStoreAnswer.getPoolType().equalsIgnoreCase("NFS")) {
List<StoragePoolVO> nfsStoragePools = _storagePoolDao.findPoolsByStorageType(StoragePoolType.NetworkFilesystem.toString());
for (StoragePoolVO storagePool : nfsStoragePools) {
String storagePoolUUID = storagePool.getUuid();
if (childStoragePoolInfo.getName().equalsIgnoreCase(storagePoolUUID.replaceAll("-", ""))) {
dataStoreVO = storagePool;
break;
}
}
}
if (dataStoreVO != null) {
if (dataStoreVO.getParent() != datastoreClusterPoolId) {
s_logger.debug(String.format("Storage pool %s with uuid %s is found to be under datastore cluster %s at vCenter, " +
"so moving the storage pool to be a child storage pool under the datastore cluster in CloudStack management server",
childStoragePoolInfo.getName(), childStoragePoolInfo.getUuid(), datastoreClusterPool.getName()));
dataStoreVO.setParent(datastoreClusterPoolId);
_storagePoolDao.update(dataStoreVO.getId(), dataStoreVO);
if (CollectionUtils.isNotEmpty(storageTags)) {
storageTags.addAll(_storagePoolTagsDao.getStoragePoolTags(dataStoreVO.getId()));
} else {
storageTags = _storagePoolTagsDao.getStoragePoolTags(dataStoreVO.getId());
}
if (CollectionUtils.isNotEmpty(storageTags)) {
Set<String> set = new LinkedHashSet<>(storageTags);
storageTags.clear();
storageTags.addAll(set);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Updating Storage Pool Tags to :" + storageTags);
}
_storagePoolTagsDao.persist(dataStoreVO.getId(), storageTags);
}
} else {
// This is to find datastores which are removed from datastore cluster.
// The final set childDatastoreUUIDs contains the UUIDs of child datastores which needs to be removed from datastore cluster
childDatastoreUUIDs.remove(childStoragePoolInfo.getUuid());
}
} else {
dataStoreVO = createChildDatastoreVO(datastoreClusterPool, childDataStoreAnswer);
}
updateStoragePoolHostVOAndBytes(dataStoreVO, hostId, childDataStoreAnswer);
}
handleRemoveChildStoragePoolFromDatastoreCluster(childDatastoreUUIDs);
}
private void validateChildDatastoresToBeAddedInUpState(StoragePoolVO datastoreClusterPool, List<ModifyStoragePoolAnswer> childDatastoreAnswerList) {
for (ModifyStoragePoolAnswer childDataStoreAnswer : childDatastoreAnswerList) {
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
StoragePoolVO dataStoreVO = _storagePoolDao.findPoolByUUID(childStoragePoolInfo.getUuid());
if (dataStoreVO == null && childDataStoreAnswer.getPoolType().equalsIgnoreCase("NFS")) {
List<StoragePoolVO> nfsStoragePools = _storagePoolDao.findPoolsByStorageType(StoragePoolType.NetworkFilesystem.toString());
for (StoragePoolVO storagePool : nfsStoragePools) {
String storagePoolUUID = storagePool.getUuid();
if (childStoragePoolInfo.getName().equalsIgnoreCase(storagePoolUUID.replaceAll("-", ""))) {
dataStoreVO = storagePool;
break;
}
}
}
if (dataStoreVO != null && !dataStoreVO.getStatus().equals(StoragePoolStatus.Up)) {
String msg = String.format("Cannot synchronise datastore cluster %s because primary storage with id %s is not ready for syncing, " +
"as the status is %s", datastoreClusterPool.getUuid(), dataStoreVO.getUuid(), dataStoreVO.getStatus().toString());
throw new CloudRuntimeException(msg);
}
}
}
private StoragePoolVO createChildDatastoreVO(StoragePoolVO datastoreClusterPool, ModifyStoragePoolAnswer childDataStoreAnswer) {
StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo();
List<String> storageTags = _storagePoolTagsDao.getStoragePoolTags(datastoreClusterPool.getId());
StoragePoolVO dataStoreVO = new StoragePoolVO();
dataStoreVO.setStorageProviderName(datastoreClusterPool.getStorageProviderName());
dataStoreVO.setHostAddress(childStoragePoolInfo.getHost());
dataStoreVO.setPoolType(Storage.StoragePoolType.PreSetup);
dataStoreVO.setPath(childStoragePoolInfo.getHostPath());
dataStoreVO.setPort(datastoreClusterPool.getPort());
dataStoreVO.setName(childStoragePoolInfo.getName());
dataStoreVO.setUuid(childStoragePoolInfo.getUuid());
dataStoreVO.setDataCenterId(datastoreClusterPool.getDataCenterId());
dataStoreVO.setPodId(datastoreClusterPool.getPodId());
dataStoreVO.setClusterId(datastoreClusterPool.getClusterId());
dataStoreVO.setStatus(StoragePoolStatus.Up);
dataStoreVO.setUserInfo(datastoreClusterPool.getUserInfo());
dataStoreVO.setManaged(datastoreClusterPool.isManaged());
dataStoreVO.setCapacityIops(datastoreClusterPool.getCapacityIops());
dataStoreVO.setCapacityBytes(childDataStoreAnswer.getPoolInfo().getCapacityBytes());
dataStoreVO.setUsedBytes(childDataStoreAnswer.getPoolInfo().getCapacityBytes() - childDataStoreAnswer.getPoolInfo().getAvailableBytes());
dataStoreVO.setHypervisor(datastoreClusterPool.getHypervisor());
dataStoreVO.setScope(datastoreClusterPool.getScope());
dataStoreVO.setParent(datastoreClusterPool.getId());
Map<String, String> details = new HashMap<>();
if(org.apache.commons.lang.StringUtils.isNotEmpty(childDataStoreAnswer.getPoolType())) {
details.put("pool_type", childDataStoreAnswer.getPoolType());
}
_storagePoolDao.persist(dataStoreVO, details, storageTags);
return dataStoreVO;
}
private void handleRemoveChildStoragePoolFromDatastoreCluster(Set<String> childDatastoreUUIDs) {
for (String childDatastoreUUID : childDatastoreUUIDs) {
StoragePoolVO dataStoreVO = _storagePoolDao.findPoolByUUID(childDatastoreUUID);
List<VolumeVO> allVolumes = _volumeDao.findByPoolId(dataStoreVO.getId());
allVolumes.removeIf(volumeVO -> volumeVO.getInstanceId() == null);
allVolumes.removeIf(volumeVO -> volumeVO.getState() != Volume.State.Ready);
for (VolumeVO volume : allVolumes) {
VMInstanceVO vmInstance = _vmInstanceDao.findById(volume.getInstanceId());
if (vmInstance == null) {
continue;
}
long volumeId = volume.getId();
Long hostId = vmInstance.getHostId();
if (hostId == null) {
hostId = vmInstance.getLastHostId();
}
HostVO hostVO = _hostDao.findById(hostId);
// Prepare for the syncvolumepath command
DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType());
Map<String, String> details = new HashMap<String, String>();
details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString());
disk.setDetails(details);
s_logger.debug(String.format("Attempting to process SyncVolumePathCommand for the volume %d on the host %d with state %s", volumeId, hostId, hostVO.getResourceState()));
SyncVolumePathCommand cmd = new SyncVolumePathCommand(disk);
final Answer answer = _agentMgr.easySend(hostId, cmd);
// validate answer
if (answer == null) {
throw new CloudRuntimeException("Unable to get an answer to the SyncVolumePath command for volume " + volumeId);
}
if (!answer.getResult()) {
throw new CloudRuntimeException("Unable to process SyncVolumePathCommand for the volume" + volumeId + " to the host " + hostId + " due to " + answer.getDetails());
}
assert (answer instanceof SyncVolumePathAnswer) : "Well, now why won't you actually return the SyncVolumePathAnswer when it's SyncVolumePathCommand? volume=" +
volume.getUuid() + "Host=" + hostId;
// check for the changed details of volume and update database
VolumeVO volumeVO = _volumeDao.findById(volumeId);
String datastoreName = answer.getContextParam("datastoreName");
if (datastoreName != null) {
StoragePoolVO storagePoolVO = _storagePoolDao.findByUuid(datastoreName);
if (storagePoolVO != null) {
volumeVO.setPoolId(storagePoolVO.getId());
} else {
s_logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId));
}
}
String volumePath = answer.getContextParam("volumePath");
if (volumePath != null) {
volumeVO.setPath(volumePath);
}
String chainInfo = answer.getContextParam("chainInfo");
if (chainInfo != null) {
volumeVO.setChainInfo(chainInfo);
}
_volumeDao.update(volumeVO.getId(), volumeVO);
}
dataStoreVO.setParent(0L);
_storagePoolDao.update(dataStoreVO.getId(), dataStoreVO);
}
}
private void updateStoragePoolHostVOAndBytes(StoragePool pool, long hostId, ModifyStoragePoolAnswer mspAnswer) {
StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(pool.getId(), hostId);
if (poolHost == null) {
poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
_storagePoolHostDao.persist(poolHost);
} else {
poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
}
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
poolVO.setUsedBytes(mspAnswer.getPoolInfo().getCapacityBytes() - mspAnswer.getPoolInfo().getAvailableBytes());
poolVO.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes());
_storagePoolDao.update(pool.getId(), poolVO);
}
protected class StorageGarbageCollector extends ManagedContextRunnable {
public StorageGarbageCollector() {

View File

@ -35,6 +35,7 @@ import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.alert.AlertManager;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
@ -100,6 +101,8 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
ManagementServer server;
@Inject
DataStoreProviderManager providerMgr;
@Inject
StorageManager storageManager;
@Override
public boolean maintain(DataStore store) {
@ -162,6 +165,10 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false succeeded");
}
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
s_logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid()));
storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId());
}
}
}
// check to see if other ps exist
@ -323,6 +330,10 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add succeeded");
}
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
s_logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid()));
storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId());
}
}
}

View File

@ -95,6 +95,7 @@ known_categories = {
'StorageMaintenance': 'Storage Pool',
'StoragePool': 'Storage Pool',
'StorageProvider': 'Storage Pool',
'syncStoragePool': 'Storage Pool',
'SecurityGroup': 'Security Group',
'SSH': 'SSH',
'register': 'Registration',