Change storage pool scope from Cluster to Zone and vise versa (#8875)

* New feature: Change storage pool scope

* Added checks for Ceph/RBD

* Update op_host_capacity table on primary storage scope change

* Storage pool scope change integration test

* pull 8875 : Addressed review comments

* Pull 8875: remove storage checks, AbstractPrimayStorageLifeCycleImpl class

* Pull 8875: Fixed integration test failure

* Pull 8875: Review comments

* Pull 8875: review comments + broke changeStoragePoolScope into smaller functions

* Added UT for changeStoragePoolScope

* Rename AbstractPrimaryDataStoreLifeCycleImpl to BasePrimaryDataStoreLifeCycleImpl

* Pull 8875: Dao review comments

* Pull 8875: Rename changeStoragePoolScope.vue to ChangeStoragePoolScope.vue

* Pull 8875: Created a new smokes test file + A single warning msg in ui

* Pull 8875: Added cleanup in test_primary_storage_scope.py

* Pull 8875: Type in en.json

* Pull 8875: cleanup array in test_primary_storage_scope.py

* Pull:8875 Removing extra whitespace at eof of StorageManagerImplTest

* Pull 8875: Added UT for PrimaryDataStoreHelper and BasePrimaryDataStoreLifeCycleImpl

* Pull 8875: Added license header

* Pull 8875: Fixed sql query for vmstates

* Pull 8875: Changed icon plus info on disabled mode in apidoc

* Pull 8875: Change scope should not work for local storage

* Pull 8875: Change scope completion event

* Pull 8875: Added api findAffectedVmsForStorageScopeChange

* Pull 8875: Added UT for findAffectedVmsForStorageScopeChange and removed listByPoolIdVMStatesNotInCluster

* Pull 8875: Review comments + Vm name in response

* Pull 8875: listByVmsNotInClusterUsingPool was returning duplicate VM entries because of multiple volumes in the VM satisfying the criteria

* Pull 8875: fixed listAffectedVmsForStorageScopeChange UT

* listAffectedVmsForStorageScopeChange should work if the pool is not disabled

* Fix listAffectedVmsForStorageScopeChangeTest UT

* Pull 8875: add volume.removed not null check in VmsNotInClusterUsingPool query

* Pull 8875: minor refactoring in changeStoragePoolScopeToCluster

* Update server/src/main/java/com/cloud/storage/StorageManagerImpl.java

* fix eof

* changeStoragePoolScopeToZone should connect pool to all Up hosts

Co-authored-by: Suresh Kumar Anaparti <sureshkumar.anaparti@gmail.com>
This commit is contained in:
Abhisar Sinha 2024-06-29 10:03:34 +05:30 committed by GitHub
parent 2ca1b474bd
commit 063dc60114
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
42 changed files with 1642 additions and 51 deletions

View File

@ -451,6 +451,7 @@ public class EventTypes {
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE";
// VPN
public static final String EVENT_REMOTE_ACCESS_VPN_CREATE = "VPN.REMOTE.ACCESS.CREATE";
@ -1000,6 +1001,7 @@ public class EventTypes {
// Primary storage pool
entityEventDetails.put(EVENT_ENABLE_PRIMARY_STORAGE, StoragePool.class);
entityEventDetails.put(EVENT_DISABLE_PRIMARY_STORAGE, StoragePool.class);
entityEventDetails.put(EVENT_CHANGE_STORAGE_POOL_SCOPE, StoragePool.class);
// VPN
entityEventDetails.put(EVENT_REMOTE_ACCESS_VPN_CREATE, RemoteAccessVpn.class);

View File

@ -21,6 +21,7 @@ import java.net.UnknownHostException;
import java.util.Map;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -35,6 +36,7 @@ import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceInUseException;
import com.cloud.exception.ResourceUnavailableException;
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
@ -130,4 +132,6 @@ public interface StorageService {
boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd);
ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd);
void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException;
}

View File

@ -0,0 +1,98 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.storage;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.context.CallContext;
import com.cloud.event.EventTypes;
import com.cloud.storage.StoragePool;
@APICommand(name = "changeStoragePoolScope", description = "Changes the scope of a storage pool when the pool is in Disabled state." +
"This feature is officially tested and supported for Hypervisors: KVM and VMware, Protocols: NFS and Ceph, and Storage Provider: DefaultPrimary. " +
"There might be extra steps involved to make this work for other hypervisors and storage options.",
responseObject = SuccessResponse.class, since= "4.19.1", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class ChangeStoragePoolScopeCmd extends BaseAsyncCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "the Id of the storage pool")
private Long id;
@Parameter(name = ApiConstants.SCOPE, type = CommandType.STRING, required = true, description = "the scope of the storage: cluster or zone")
private String scope;
@Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "the Id of the cluster to use if scope is being set to Cluster")
private Long clusterId;
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.StoragePool;
}
@Override
public Long getApiResourceId() {
return getId();
}
public String getEventType() {
return EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE;
}
@Override
public String getEventDescription() {
String description = "Change storage pool scope. Storage pool Id: ";
StoragePool pool = _entityMgr.findById(StoragePool.class, getId());
if (pool != null) {
description += pool.getUuid();
} else {
description += getId();
}
description += " to " + getScope();
return description;
}
@Override
public void execute() {
_storageService.changeStoragePoolScope(this);
SuccessResponse response = new SuccessResponse(getCommandName());
this.setResponseObject(response);
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccountId();
}
public Long getId() {
return id;
}
public String getScope() {
return scope;
}
public Long getClusterId() {
return clusterId;
}
}

View File

@ -0,0 +1,77 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.api.command.admin.vm;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.VirtualMachineResponse;
import com.cloud.vm.VirtualMachine;
@APICommand(name = "listAffectedVmsForStorageScopeChange",
description = "List user and system VMs that need to be stopped and destroyed respectively for changing the scope of the storage pool from Zone to Cluster.",
responseObject = VirtualMachineResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.1",
authorized = {RoleType.Admin})
public class ListAffectedVmsForStorageScopeChangeCmd extends BaseListCmd {
@Parameter(name = ApiConstants.CLUSTER_ID,
type = CommandType.UUID,
entityType = ClusterResponse.class,
required = true,
description = "the Id of the cluster the scope of the storage pool is being changed to")
private Long clusterIdForScopeChange;
@Parameter(name = ApiConstants.STORAGE_ID,
type = CommandType.UUID,
entityType = StoragePoolResponse.class,
required = true,
description = "the Id of the storage pool on which change scope operation is being done")
private Long storageId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getClusterIdForScopeChange() {
return clusterIdForScopeChange;
}
public Long getStorageId() {
return storageId;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() {
ListResponse<VirtualMachineResponse> response = _queryService.listAffectedVmsForStorageScopeChange(this);
response.setResponseName(getCommandName());
response.setObjectName(VirtualMachine.class.getSimpleName().toLowerCase());
setResponseObject(response);
}
}

View File

@ -0,0 +1,124 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.serializer.Param;
import com.cloud.vm.VirtualMachine;
import com.google.gson.annotations.SerializedName;
@EntityReference(value = VirtualMachine.class)
public class VirtualMachineResponse extends BaseResponse {
@SerializedName("id")
@Param(description = "the ID of the VM")
private String id;
@SerializedName("type")
@Param(description = "the type of VM")
private String type;
@SerializedName("name")
@Param(description = "the name of the VM")
private String name;
@SerializedName("clusterid")
@Param(description = "the cluster ID for the VM")
private String clusterId;
@SerializedName("clustername")
@Param(description = "the cluster name for the VM")
private String clusterName;
@SerializedName("hostid")
@Param(description = "the host ID for the VM")
private String hostId;
@SerializedName("hostname")
@Param(description = "the hostname for the VM")
private String hostName;
@Override
public String getObjectId() {
return this.getId();
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getVmType() {
return type;
}
public void setVmType(String type) {
this.type = type;
}
public String getVmName() {
return name;
}
public void setVmName(String name) {
this.name = name;
}
public String getClusterId() {
return clusterId;
}
public void setClusterId(String clusterId) {
this.clusterId = clusterId;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getHostId() {
return hostId;
}
public void setHostId(String hostId) {
this.hostId = hostId;
}
public String getHostName() {
return hostName;
}
public void setHostName(String hostName) {
this.hostName = hostName;
}
}

View File

@ -52,6 +52,7 @@ import org.apache.cloudstack.api.command.user.snapshot.CopySnapshotCmd;
import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd;
import org.apache.cloudstack.api.command.user.tag.ListTagsCmd;
import org.apache.cloudstack.api.command.user.template.ListTemplatesCmd;
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
import org.apache.cloudstack.api.command.user.vm.ListVMsCmd;
import org.apache.cloudstack.api.command.user.vmgroup.ListVMGroupsCmd;
import org.apache.cloudstack.api.command.user.volume.ListResourceDetailsCmd;
@ -89,6 +90,7 @@ import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.api.response.TemplateResponse;
import org.apache.cloudstack.api.response.UserResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VirtualMachineResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
@ -140,6 +142,8 @@ public interface QueryService {
ListResponse<UserVmResponse> searchForUserVMs(ListVMsCmd cmd);
ListResponse<VirtualMachineResponse> listAffectedVmsForStorageScopeChange(ListAffectedVmsForStorageScopeChangeCmd cmd);
ListResponse<SecurityGroupResponse> searchForSecurityGroups(ListSecurityGroupsCmd cmd);
ListResponse<DomainRouterResponse> searchForRouters(ListRoutersCmd cmd);

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.Map;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.StoragePool;
public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
@ -29,4 +30,6 @@ public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
void updateStoragePool(StoragePool storagePool, Map<String, String> details);
void enableStoragePool(DataStore store);
void disableStoragePool(DataStore store);
void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType);
void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType);
}

View File

@ -134,6 +134,10 @@ public interface ResourceManager extends ResourceService, Configurable {
public List<HostVO> listAllHostsInAllZonesByType(Type type);
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisor(final HypervisorType type, long dcId, long clusterId);
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisors(List<HypervisorType> types, long dcId, long clusterId);
public List<HypervisorType> listAvailHypervisorInZone(Long hostId, Long zoneId);
public HostVO findHostByGuid(String guid);

View File

@ -132,8 +132,8 @@ public class CapacityVO implements Capacity {
return podId;
}
public void setPodId(long podId) {
this.podId = new Long(podId);
public void setPodId(Long podId) {
this.podId = podId;
}
@Override
@ -141,8 +141,8 @@ public class CapacityVO implements Capacity {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = new Long(clusterId);
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
@Override

View File

@ -41,4 +41,6 @@ public interface StoragePoolHostDao extends GenericDao<StoragePoolHostVO, Long>
public void deleteStoragePoolHostDetails(long hostId, long poolId);
List<StoragePoolHostVO> listByHostId(long hostId);
Pair<List<StoragePoolHostVO>, Integer> listByPoolIdNotInCluster(long clusterId, long poolId);
}

View File

@ -23,13 +23,19 @@ import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.utils.Pair;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
@ -42,6 +48,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
protected final SearchBuilder<StoragePoolHostVO> HostSearch;
protected final SearchBuilder<StoragePoolHostVO> PoolHostSearch;
protected SearchBuilder<StoragePoolHostVO> poolNotInClusterSearch;
@Inject
HostDao hostDao;
protected static final String HOST_FOR_POOL_SEARCH = "SELECT * FROM storage_pool_host_ref ph, host h where ph.host_id = h.id and ph.pool_id=? and h.status=? ";
protected static final String HOSTS_FOR_POOLS_SEARCH = "SELECT DISTINCT(ph.host_id) FROM storage_pool_host_ref ph, host h WHERE ph.host_id = h.id AND h.status = 'Up' AND resource_state = 'Enabled' AND ph.pool_id IN (?)";
@ -70,6 +81,15 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
}
@PostConstruct
public void init(){
poolNotInClusterSearch = createSearchBuilder();
poolNotInClusterSearch.and("poolId", poolNotInClusterSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
SearchBuilder<HostVO> hostSearch = hostDao.createSearchBuilder();
poolNotInClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), poolNotInClusterSearch.entity().getHostId(), JoinBuilder.JoinType.INNER);
hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.NEQ);
}
@Override
public List<StoragePoolHostVO> listByPoolId(long id) {
SearchCriteria<StoragePoolHostVO> sc = PoolSearch.create();
@ -196,4 +216,12 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
remove(sc);
txn.commit();
}
@Override
public Pair<List<StoragePoolHostVO>, Integer> listByPoolIdNotInCluster(long clusterId, long poolId) {
SearchCriteria<StoragePoolHostVO> sc = poolNotInClusterSearch.create();
sc.setParameters("poolId", poolId);
sc.setJoinParameters("hostSearch", "clusterId", clusterId);
return searchAndCount(sc, null);
}
}

View File

@ -72,8 +72,9 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
protected GenericSearchBuilder<VolumeVO, SumCount> primaryStorageSearch2;
protected GenericSearchBuilder<VolumeVO, SumCount> secondaryStorageSearch;
private final SearchBuilder<VolumeVO> poolAndPathSearch;
@Inject
ResourceTagDao _tagsDao;
ResourceTagDao tagsDao;
// need to account for zone-wide primary storage where storage_pool has
// null-value pod and cluster, where hypervisor information is stored in
@ -493,7 +494,6 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ);
poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ);
poolAndPathSearch.done();
}
@Override
@ -719,7 +719,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
s_logger.debug(String.format("Removing volume %s from DB", id));
VolumeVO entry = findById(id);
if (entry != null) {
_tagsDao.removeByIdAndType(id, ResourceObjectType.Volume);
tagsDao.removeByIdAndType(id, ResourceObjectType.Volume);
}
boolean result = super.remove(id);
@ -742,7 +742,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
destVol.setInstanceId(instanceId);
update(srcVolId, srcVol);
update(destVolId, destVol);
_tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume);
tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume);
} catch (Exception e) {
throw new CloudRuntimeException("Unable to persist the sequence number for this host");
}

View File

@ -165,4 +165,6 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType);
List<VMInstanceVO> listByHostOrLastHostOrHostPod(List<Long> hostIds, long podId);
Pair<List<VMInstanceVO>, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId);
}

View File

@ -24,6 +24,7 @@ import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
@ -35,6 +36,8 @@ import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.utils.DateUtil;
import com.cloud.utils.Pair;
@ -97,11 +100,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
protected SearchBuilder<VMInstanceVO> NotMigratingSearch;
protected SearchBuilder<VMInstanceVO> BackupSearch;
protected SearchBuilder<VMInstanceVO> LastHostAndStatesSearch;
protected SearchBuilder<VMInstanceVO> VmsNotInClusterUsingPool;
@Inject
ResourceTagDao _tagsDao;
ResourceTagDao tagsDao;
@Inject
NicDao _nicDao;
NicDao nicDao;
@Inject
VolumeDao volumeDao;
@Inject
HostDao hostDao;
protected Attribute _updateTimeAttr;
@ -278,7 +286,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
_updateTimeAttr = _allAttributes.get("updateTime");
assert _updateTimeAttr != null : "Couldn't get this updateTime attribute";
SearchBuilder<NicVO> nicSearch = _nicDao.createSearchBuilder();
SearchBuilder<NicVO> nicSearch = nicDao.createSearchBuilder();
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
nicSearch.and("removedNic", nicSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
@ -307,6 +315,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
LastHostAndStatesSearch.and("states", LastHostAndStatesSearch.entity().getState(), Op.IN);
LastHostAndStatesSearch.done();
VmsNotInClusterUsingPool = createSearchBuilder();
SearchBuilder<VolumeVO> volumeSearch = volumeDao.createSearchBuilder();
volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), Op.EQ);
volumeSearch.and("removed", volumeSearch.entity().getRemoved(), Op.NULL);
VmsNotInClusterUsingPool.join("volumeSearch", volumeSearch, volumeSearch.entity().getInstanceId(), VmsNotInClusterUsingPool.entity().getId(), JoinType.INNER);
SearchBuilder<HostVO> hostSearch2 = hostDao.createSearchBuilder();
hostSearch2.and("clusterId", hostSearch2.entity().getClusterId(), SearchCriteria.Op.NEQ);
VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER);
VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN);
VmsNotInClusterUsingPool.done();
}
@Override
@ -836,7 +854,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
public List<VMInstanceVO> listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) {
if (NetworkTypeSearch == null) {
SearchBuilder<NicVO> nicSearch = _nicDao.createSearchBuilder();
SearchBuilder<NicVO> nicSearch = nicDao.createSearchBuilder();
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
NetworkTypeSearch = createSearchBuilder();
@ -873,7 +891,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
txn.start();
VMInstanceVO vm = findById(id);
if (vm != null && vm.getType() == Type.User) {
_tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm);
tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm);
}
boolean result = super.remove(id);
txn.commit();
@ -1018,4 +1036,15 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
sc.setParameters("podId", String.valueOf(podId));
return listBy(sc);
}
@Override
public Pair<List<VMInstanceVO>, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) {
SearchCriteria<VMInstanceVO> sc = VmsNotInClusterUsingPool.create();
sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring);
sc.setJoinParameters("volumeSearch", "poolId", poolId);
sc.setJoinParameters("hostSearch2", "clusterId", clusterId);
List<VMInstanceVO> vms = search(sc, null);
List<VMInstanceVO> uniqueVms = vms.stream().distinct().collect(Collectors.toList());
return new Pair<>(uniqueVms, uniqueVms.size());
}
}

View File

@ -28,31 +28,34 @@ import javax.inject.Inject;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallbackNoReturn;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.CloudRuntimeException;
@Component
@ -265,4 +268,48 @@ public class PrimaryDataStoreHelper {
return true;
}
public void switchToZone(DataStore store, HypervisorType hypervisorType) {
StoragePoolVO pool = dataStoreDao.findById(store.getId());
CapacityVO capacity = _capacityDao.findByHostIdType(store.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED);
Transaction.execute(new TransactionCallbackNoReturn() {
public void doInTransactionWithoutResult(TransactionStatus status) {
pool.setScope(ScopeType.ZONE);
pool.setPodId(null);
pool.setClusterId(null);
pool.setHypervisor(hypervisorType);
dataStoreDao.update(pool.getId(), pool);
capacity.setPodId(null);
capacity.setClusterId(null);
_capacityDao.update(capacity.getId(), capacity);
}
});
s_logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to zone");
}
public void switchToCluster(DataStore store, ClusterScope clusterScope) {
List<StoragePoolHostVO> hostPoolRecords = storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(), store.getId()).first();
StoragePoolVO pool = dataStoreDao.findById(store.getId());
CapacityVO capacity = _capacityDao.findByHostIdType(store.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED);
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
if (hostPoolRecords != null) {
for (StoragePoolHostVO host : hostPoolRecords) {
storagePoolHostDao.deleteStoragePoolHostDetails(host.getHostId(), host.getPoolId());
}
}
pool.setScope(ScopeType.CLUSTER);
pool.setPodId(clusterScope.getPodId());
pool.setClusterId(clusterScope.getScopeId());
dataStoreDao.update(pool.getId(), pool);
capacity.setPodId(clusterScope.getPodId());
capacity.setClusterId(clusterScope.getScopeId());
_capacityDao.update(capacity.getId(), capacity);
}
});
s_logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to cluster id=" + clusterScope.getScopeId());
}
}

View File

@ -0,0 +1,114 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.volume.datastore;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.Pair;
@RunWith(MockitoJUnitRunner.class)
public class PrimaryDataStoreHelperTest {
@Mock
private PrimaryDataStoreDao dataStoreDao;
@Mock
private CapacityDao capacityDao;
@Mock
private StoragePoolHostDao storagePoolHostDao;
@Spy
@InjectMocks
PrimaryDataStoreHelper dataStoreHelper;
private static final Long ZONE_ID = 1L;
private static final Long CLUSTER_ID = 2L;
private static final Long POD_ID = 3L;
private static final Long POOL_ID = 4L;
private static final Short capacityType = 0;
private static final Float usedPercentage = 0.0f;
@Test
public void testSwitchToZone() {
StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, ZONE_ID, POD_ID, 0L, 0L, null, 0, null);
pool.setClusterId(CLUSTER_ID);
pool.setScope(ScopeType.CLUSTER);
CapacityVO capacity = new CapacityVO(ZONE_ID, POD_ID, CLUSTER_ID, capacityType, usedPercentage);
Mockito.when(dataStoreDao.findById(pool.getId())).thenReturn(pool);
Mockito.when(capacityDao.findByHostIdType(pool.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)).thenReturn(capacity);
DataStore storeMock = Mockito.mock(DataStore.class);
Mockito.when(storeMock.getId()).thenReturn(POOL_ID);
dataStoreHelper.switchToZone(storeMock, HypervisorType.KVM);
Assert.assertEquals(pool.getScope(), ScopeType.ZONE);
Assert.assertEquals(pool.getPodId(), null);
Assert.assertEquals(pool.getClusterId(), null);
Assert.assertEquals(pool.getHypervisor(), HypervisorType.KVM);
Assert.assertEquals(capacity.getPodId(), null);
Assert.assertEquals(capacity.getClusterId(), null);
}
@Test
public void testSwitchToCluster() {
StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, ZONE_ID, null, 0L, 0L, null, 0, null);
pool.setScope(ScopeType.ZONE);
CapacityVO capacity = new CapacityVO(ZONE_ID, null, null, capacityType, usedPercentage);
ClusterScope clusterScope = new ClusterScope(CLUSTER_ID, POD_ID, ZONE_ID);
Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords = new Pair<>(null, 0);
Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords);
Mockito.when(dataStoreDao.findById(pool.getId())).thenReturn(pool);
Mockito.when(capacityDao.findByHostIdType(pool.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)).thenReturn(capacity);
DataStore storeMock = Mockito.mock(DataStore.class);
Mockito.when(storeMock.getId()).thenReturn(POOL_ID);
dataStoreHelper.switchToCluster(storeMock, clusterScope);
Mockito.verify(storagePoolHostDao, Mockito.never()).deleteStoragePoolHostDetails(Mockito.anyLong(), Mockito.anyLong());
Assert.assertEquals(pool.getScope(), ScopeType.CLUSTER);
Assert.assertEquals(pool.getPodId(), POD_ID);
Assert.assertEquals(pool.getClusterId(), CLUSTER_ID);
Assert.assertEquals(capacity.getPodId(), POD_ID);
Assert.assertEquals(capacity.getClusterId(), CLUSTER_ID);
}
}

View File

@ -0,0 +1,106 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.util.Arrays;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.Pair;
public class BasePrimaryDataStoreLifeCycleImpl {
private static final Logger s_logger = Logger.getLogger(BasePrimaryDataStoreLifeCycleImpl.class);
@Inject
AgentManager agentMgr;
@Inject
protected ResourceManager resourceMgr;
@Inject
StorageManager storageMgr;
@Inject
PrimaryDataStoreHelper dataStoreHelper;
@Inject
protected HostDao hostDao;
@Inject
protected StoragePoolHostDao storagePoolHostDao;
private List<HostVO> getPoolHostsList(ClusterScope clusterScope, HypervisorType hypervisorType) {
List<HostVO> hosts;
if (hypervisorType != null) {
hosts = resourceMgr
.listAllHostsInOneZoneNotInClusterByHypervisor(hypervisorType, clusterScope.getZoneId(), clusterScope.getScopeId());
} else {
List<HypervisorType> hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware);
hosts = resourceMgr
.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, clusterScope.getZoneId(), clusterScope.getScopeId());
}
return hosts;
}
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
List<HostVO> hosts = getPoolHostsList(clusterScope, hypervisorType);
s_logger.debug("Changing scope of the storage pool to Zone");
if (hosts != null) {
for (HostVO host : hosts) {
try {
storageMgr.connectHostToSharedPool(host.getId(), store.getId());
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host + " and " + store, e);
}
}
}
dataStoreHelper.switchToZone(store, hypervisorType);
}
public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords = storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(), store.getId());
s_logger.debug("Changing scope of the storage pool to Cluster");
if (hostPoolRecords.second() > 0) {
StoragePool pool = (StoragePool) store;
for (StoragePoolHostVO host : hostPoolRecords.first()) {
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(pool);
final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd);
if (answer != null) {
if (!answer.getResult()) {
s_logger.debug("Failed to delete storage pool: " + answer.getResult());
} else if (HypervisorType.KVM != hypervisorType) {
break;
}
}
}
}
dataStoreHelper.switchToCluster(store, clusterScope);
}
}

View File

@ -0,0 +1,127 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.lifecycle;
import static org.mockito.ArgumentMatchers.eq;
import java.util.Arrays;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreImpl;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.Pair;
@RunWith(MockitoJUnitRunner.class)
public class BasePrimaryDataStoreLifeCycleImplTest {
@Mock
private StoragePoolHostDao storagePoolHostDao;
@Mock
private PrimaryDataStoreHelper dataStoreHelper;
@Mock
private AgentManager agentManager;
@Mock
private ResourceManager resourceManager;
@Mock
private StorageManager storageManager;
@Spy
@InjectMocks
private BasePrimaryDataStoreLifeCycleImpl dataStoreLifeCycle;
private static final Long POOL_ID = 1L;
private static final Long CLUSTER_ID = 2L;
private static final Long POD_ID = 3L;
private static final Long ZONE_ID = 4L;
private static final Long HOST_ID = 5L;
private static ClusterScope clusterScope;
private static PrimaryDataStoreImpl store;
@BeforeClass
public static void init() {
clusterScope = new ClusterScope(CLUSTER_ID, POD_ID, ZONE_ID);
StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, 0L, 0L, 0L, 0L, null, 0, null);
store = new PrimaryDataStoreImpl();
store.configure(pool, null, null);
}
@Test
public void testChangeStoragePoolScopeToZone() throws Exception {
Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType.KVM, ZONE_ID, CLUSTER_ID)).thenReturn(null);
dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, HypervisorType.KVM);
Mockito.verify(dataStoreHelper, Mockito.times(1)).switchToZone(store, HypervisorType.KVM);
HostVO host = new HostVO(null);
ReflectionTestUtils.setField(host, "id", HOST_ID);
List<HypervisorType> hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware);
Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, ZONE_ID, CLUSTER_ID)).thenReturn(Arrays.asList(host));
Mockito.when(storageManager.connectHostToSharedPool(HOST_ID, POOL_ID)).thenReturn(true);
dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, null);
Mockito.verify(dataStoreHelper, Mockito.times(1)).switchToZone(store, null);
}
@Test
public void testChangeStoragePoolScopeToCluster() {
Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords = new Pair<>(null, 0);
Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords);
Mockito.doNothing().when(dataStoreHelper).switchToCluster(store, clusterScope);
dataStoreLifeCycle.changeStoragePoolScopeToCluster(store, clusterScope, HypervisorType.KVM);
hostPoolRecords.set(Arrays.asList(new StoragePoolHostVO(POOL_ID, HOST_ID, null)), 1);
Answer answer = new Answer(null, false, null);
Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords);
Mockito.when(agentManager.easySend(eq(HOST_ID), Mockito.any(DeleteStoragePoolCommand.class))).thenReturn(answer);
dataStoreLifeCycle.changeStoragePoolScopeToCluster(store, clusterScope, HypervisorType.KVM);
Mockito.verify(dataStoreHelper, Mockito.times(2)).switchToCluster(store, clusterScope);
}
}

View File

@ -62,7 +62,7 @@ import com.cloud.host.Host;
/**
* Manages the lifecycle of a Managed Data Store in CloudStack
*/
public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
@Inject
private PrimaryDataStoreDao _storagePoolDao;
private static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreLifeCycleImpl.class);

View File

@ -65,7 +65,7 @@ import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
public class ElastistorPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(ElastistorPrimaryDataStoreLifeCycle.class);
@Inject

View File

@ -20,11 +20,11 @@ package org.apache.cloudstack.storage.datastore.lifecycle;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
@ -43,10 +43,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
@ -58,7 +58,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreLifeCycle.class);
@Inject
@ -395,6 +395,15 @@ public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycl
dataStoreHelper.disable(dataStore);
}
@Override
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
/*
* We need to attach all VMware, Xenserver and KVM hosts in the zone.
* So pass hypervisorType as null.
*/
super.changeStoragePoolScopeToZone(store, clusterScope, null);
}
private HypervisorType getHypervisorTypeForCluster(long clusterId) {
ClusterVO cluster = _clusterDao.findById(clusterId);

View File

@ -72,7 +72,7 @@ import java.util.List;
import java.util.Map;
import java.util.UUID;
public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class);
@Inject
protected ResourceManager _resourceMgr;

View File

@ -53,7 +53,7 @@ import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.log4j.Logger;
public class LinstorPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(LinstorPrimaryDataStoreLifeCycleImpl.class);
@Inject

View File

@ -30,6 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.datastore.util.NexentaUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.log4j.Logger;
@ -45,6 +46,7 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
public class NexentaPrimaryDataStoreLifeCycle
extends BasePrimaryDataStoreLifeCycleImpl
implements PrimaryDataStoreLifeCycle {
private static final Logger logger =
Logger.getLogger(NexentaPrimaryDataStoreLifeCycle.class);
@ -177,6 +179,15 @@ public class NexentaPrimaryDataStoreLifeCycle
dataStoreHelper.disable(dataStore);
}
@Override
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType) {
/*
* We need to attach all VMware, Xenserver and KVM hosts in the zone.
* So pass hypervisorType as null.
*/
super.changeStoragePoolScopeToZone(store, clusterScope, null);
}
@Override
public boolean deleteDataStore(DataStore store) {
return dataStoreHelper.deletePrimaryDataStore(store);

View File

@ -146,4 +146,12 @@ public class SamplePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLife
@Override
public void disableStoragePool(DataStore store) {
}
@Override
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
}
@Override
public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
}
}

View File

@ -74,7 +74,7 @@ import com.cloud.utils.UriUtils;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreLifeCycle.class);
@Inject

View File

@ -63,7 +63,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.google.common.base.Preconditions;
public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class);
@Inject private CapacityManager _capacityMgr;
@ -387,4 +387,13 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
public void disableStoragePool(DataStore dataStore) {
_dataStoreHelper.disable(dataStore);
}
@Override
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
/*
* We need to attach all VMware, Xenserver and KVM hosts in the zone.
* So pass hypervisorType as null.
*/
super.changeStoragePoolScopeToZone(store, clusterScope, null);
}
}

View File

@ -72,7 +72,7 @@ import com.cloud.user.dao.AccountDao;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.exception.CloudRuntimeException;
public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger LOGGER = Logger.getLogger(SolidFireSharedPrimaryDataStoreLifeCycle.class);
@Inject private AccountDao accountDao;

View File

@ -60,7 +60,7 @@ import com.cloud.storage.dao.VMTemplateDetailsDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.utils.exception.CloudRuntimeException;
public class StorPoolPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreLifeCycle.class);
@Inject

View File

@ -100,6 +100,7 @@ import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
import org.apache.cloudstack.api.command.admin.template.ListTemplatesCmdByAdmin;
import org.apache.cloudstack.api.command.admin.user.ListUsersCmd;
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
import org.apache.cloudstack.api.command.admin.zone.ListZonesCmdByAdmin;
import org.apache.cloudstack.api.command.user.account.ListAccountsCmd;
import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd;
@ -155,6 +156,7 @@ import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.api.response.TemplateResponse;
import org.apache.cloudstack.api.response.UserResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VirtualMachineResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.backup.BackupOfferingVO;
@ -243,8 +245,10 @@ import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.api.query.vo.VolumeJoinVO;
import com.cloud.cluster.ManagementServerHostVO;
import com.cloud.cluster.dao.ManagementServerHostDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DedicatedResourceVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DedicatedResourceDao;
import com.cloud.domain.Domain;
import com.cloud.domain.DomainVO;
@ -593,6 +597,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
@Inject
private StoragePoolHostDao storagePoolHostDao;
@Inject
private ClusterDao clusterDao;
private SearchCriteria<ServiceOfferingJoinVO> getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) {
SearchCriteria<ServiceOfferingJoinVO> sc = _srvOfferingJoinDao.createSearchCriteria();
SearchCriteria<ServiceOfferingJoinVO> sc1 = _srvOfferingJoinDao.createSearchCriteria();
@ -1147,6 +1155,58 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
@Override
public ListResponse<VirtualMachineResponse> listAffectedVmsForStorageScopeChange(ListAffectedVmsForStorageScopeChangeCmd cmd) {
Long poolId = cmd.getStorageId();
StoragePoolVO pool = storagePoolDao.findById(poolId);
if (pool == null) {
throw new IllegalArgumentException("Unable to find storage pool with ID: " + poolId);
}
ListResponse<VirtualMachineResponse> response = new ListResponse<>();
List<VirtualMachineResponse> responsesList = new ArrayList<>();
if (pool.getScope() != ScopeType.ZONE) {
response.setResponses(responsesList, 0);
return response;
}
Pair<List<VMInstanceVO>, Integer> vms = _vmInstanceDao.listByVmsNotInClusterUsingPool(cmd.getClusterIdForScopeChange(), poolId);
for (VMInstanceVO vm : vms.first()) {
VirtualMachineResponse resp = new VirtualMachineResponse();
resp.setObjectName(VirtualMachine.class.getSimpleName().toLowerCase());
resp.setId(vm.getUuid());
resp.setVmType(vm.getType().toString());
UserVmJoinVO userVM = null;
if (!vm.getType().isUsedBySystem()) {
userVM = _userVmJoinDao.findById(vm.getId());
}
if (userVM != null) {
if (userVM.getDisplayName() != null) {
resp.setVmName(userVM.getDisplayName());
} else {
resp.setVmName(userVM.getName());
}
} else {
resp.setVmName(vm.getInstanceName());
}
HostVO host = hostDao.findById(vm.getHostId());
if (host != null) {
resp.setHostId(host.getUuid());
resp.setHostName(host.getName());
ClusterVO cluster = clusterDao.findById(host.getClusterId());
if (cluster != null) {
resp.setClusterId(cluster.getUuid());
resp.setClusterName(cluster.getName());
}
}
responsesList.add(resp);
}
response.setResponses(responsesList, vms.second());
return response;
}
private Object getObjectPossibleMethodValue(Object obj, String methodName) {
Object result = null;

View File

@ -3426,6 +3426,26 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
return _hostGpuGroupsDao.customSearch(sc, searchFilter);
}
@Override
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisor(final HypervisorType type, final long dcId, final long clusterId) {
final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
sc.and(sc.entity().getHypervisorType(), Op.EQ, type);
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
sc.and(sc.entity().getClusterId(), Op.NEQ, clusterId);
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
return sc.list();
}
@Override
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisors(List<HypervisorType> types, final long dcId, final long clusterId) {
final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
sc.and(sc.entity().getHypervisorType(), Op.IN, types);
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
sc.and(sc.entity().getClusterId(), Op.NEQ, clusterId);
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
return sc.list();
}
@Override
public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) {
if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) {

View File

@ -211,6 +211,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD;
import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -522,6 +523,7 @@ import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd;
import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd;
import org.apache.cloudstack.api.command.user.vm.DeployVMCmd;
import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd;
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd;
import org.apache.cloudstack.api.command.user.vm.ListNicsCmd;
import org.apache.cloudstack.api.command.user.vm.ListVMsCmd;
@ -3481,6 +3483,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(UpgradeRouterCmd.class);
cmdList.add(AddSwiftCmd.class);
cmdList.add(CancelPrimaryStorageMaintenanceCmd.class);
cmdList.add(ChangeStoragePoolScopeCmd.class);
cmdList.add(CreateStoragePoolCmd.class);
cmdList.add(DeletePoolCmd.class);
cmdList.add(ListSwiftsCmd.class);
@ -3917,6 +3920,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(CreateSecondaryStorageSelectorCmd.class);
cmdList.add(UpdateSecondaryStorageSelectorCmd.class);
cmdList.add(RemoveSecondaryStorageSelectorCmd.class);
cmdList.add(ListAffectedVmsForStorageScopeChangeCmd.class);
// Out-of-band management APIs for admins

View File

@ -54,6 +54,7 @@ import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -257,6 +258,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.VMInstanceDao;
import com.google.common.collect.Sets;
@ -412,6 +414,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
private final Map<String, HypervisorHostListener> hostListeners = new HashMap<String, HypervisorHostListener>();
private final Set<HypervisorType> zoneWidePoolSupportedHypervisorTypes = Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware,
HypervisorType.Hyperv, HypervisorType.LXC, HypervisorType.Any, HypervisorType.Simulator);
private static final String NFS_MOUNT_OPTIONS_INCORRECT = "An incorrect mount option was specified";
public boolean share(VMInstanceVO vm, List<VolumeVO> vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException {
@ -967,9 +972,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
throw new InvalidParameterValueException("Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage.");
}
Set<HypervisorType> supportedHypervisorTypes = Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware,
HypervisorType.Hyperv, HypervisorType.LXC, HypervisorType.Any, HypervisorType.Simulator);
if (!supportedHypervisorTypes.contains(hypervisorType)) {
if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) {
throw new InvalidParameterValueException("Zone wide storage pool is not supported for hypervisor type " + hypervisor);
}
} else {
@ -1249,6 +1252,115 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
private void changeStoragePoolScopeToZone(StoragePoolVO primaryStorage) {
/*
* For cluster wide primary storage the hypervisor type might not be set.
* So, get it from the clusterVO.
*/
Long clusterId = primaryStorage.getClusterId();
ClusterVO clusterVO = _clusterDao.findById(clusterId);
HypervisorType hypervisorType = clusterVO.getHypervisorType();
if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) {
throw new InvalidParameterValueException("Primary storage scope change to Zone is not supported for hypervisor type " + hypervisorType);
}
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle) storeProvider.getDataStoreLifeCycle();
DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(primaryStorage.getId());
ClusterScope clusterScope = new ClusterScope(primaryStorage.getClusterId(), null, primaryStorage.getDataCenterId());
lifeCycle.changeStoragePoolScopeToZone(primaryStore, clusterScope, hypervisorType);
}
private void changeStoragePoolScopeToCluster(StoragePoolVO primaryStorage, Long clusterId) {
if (clusterId == null) {
throw new InvalidParameterValueException("Cluster ID not provided");
}
ClusterVO clusterVO = _clusterDao.findById(clusterId);
if (clusterVO == null) {
throw new InvalidParameterValueException("Unable to find cluster by id " + clusterId);
}
if (clusterVO.getAllocationState().equals(Grouping.AllocationState.Disabled)) {
throw new PermissionDeniedException("Cannot perform this operation, Cluster is currently disabled: " + clusterId);
}
List<VirtualMachine.State> states = Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring);
Long id = primaryStorage.getId();
Pair<List<VMInstanceVO>, Integer> vmsNotInClusterUsingPool = _vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, id);
if (vmsNotInClusterUsingPool.second() != 0) {
throw new CloudRuntimeException(String.format("Cannot change scope of the storage pool [%s] to cluster [%s] " +
"as there are %s VMs with volumes in this pool that are running on other clusters. " +
"All such User VMs must be stopped and System VMs must be destroyed before proceeding. " +
"Please use the API listAffectedVmsForStorageScopeChange to get the list.",
primaryStorage.getName(), clusterVO.getName(), vmsNotInClusterUsingPool.second()));
}
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle) storeProvider.getDataStoreLifeCycle();
DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(id);
ClusterScope clusterScope = new ClusterScope(clusterId, clusterVO.getPodId(), primaryStorage.getDataCenterId());
lifeCycle.changeStoragePoolScopeToCluster(primaryStore, clusterScope, primaryStorage.getHypervisor());
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE, eventDescription = "changing storage pool scope")
public void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException {
Long id = cmd.getId();
Long accountId = cmd.getEntityOwnerId();
if (!_accountMgr.isRootAdmin(accountId)) {
throw new PermissionDeniedException("Only root admin can perform this operation");
}
ScopeType newScope = EnumUtils.getEnumIgnoreCase(ScopeType.class, cmd.getScope());
if (newScope != ScopeType.ZONE && newScope != ScopeType.CLUSTER) {
throw new InvalidParameterValueException("Invalid scope " + cmd.getScope() + "for Primary storage");
}
StoragePoolVO primaryStorage = _storagePoolDao.findById(id);
if (primaryStorage == null) {
throw new IllegalArgumentException("Unable to find storage pool with ID: " + id);
}
String eventDetails = String.format(" Storage pool Id: %s to %s",primaryStorage.getUuid(), newScope);
CallContext.current().setEventDetails(eventDetails);
ScopeType currentScope = primaryStorage.getScope();
if (currentScope.equals(newScope)) {
throw new InvalidParameterValueException("New scope must be different than the current scope");
}
if (currentScope != ScopeType.ZONE && currentScope != ScopeType.CLUSTER) {
throw new InvalidParameterValueException("This operation is supported only for Primary storages having scope "
+ ScopeType.CLUSTER + " or " + ScopeType.ZONE);
}
if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) {
throw new InvalidParameterValueException("Scope of the Primary storage with id "
+ primaryStorage.getUuid() +
" cannot be changed, as it is not in the Disabled state");
}
Long zoneId = primaryStorage.getDataCenterId();
DataCenterVO zone = _dcDao.findById(zoneId);
if (zone == null) {
throw new InvalidParameterValueException("Unable to find zone by id " + zoneId);
}
if (zone.getAllocationState().equals(Grouping.AllocationState.Disabled)) {
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId);
}
if (newScope.equals(ScopeType.ZONE)) {
changeStoragePoolScopeToZone(primaryStorage);
} else {
changeStoragePoolScopeToCluster(primaryStorage, cmd.getClusterId());
}
}
@Override
public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) {
final Map<String, String> details = new HashMap<>();

View File

@ -18,18 +18,25 @@
package com.cloud.api.query;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
import com.cloud.api.query.vo.EventJoinVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.event.EventVO;
import com.cloud.event.dao.EventDao;
import com.cloud.event.dao.EventJoinDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.network.Network;
import com.cloud.network.VNF;
import com.cloud.network.dao.NetworkVO;
import com.cloud.server.ResourceTag;
import com.cloud.storage.BucketVO;
import com.cloud.storage.ScopeType;
import com.cloud.storage.dao.BucketDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
@ -41,10 +48,14 @@ import com.cloud.utils.db.EntityManager;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd;
import org.apache.cloudstack.api.command.user.event.ListEventsCmd;
import org.apache.cloudstack.api.command.user.resource.ListDetailOptionsCmd;
@ -52,9 +63,12 @@ import org.apache.cloudstack.api.response.DetailOptionsResponse;
import org.apache.cloudstack.api.response.EventResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.ObjectStoreResponse;
import org.apache.cloudstack.api.response.VirtualMachineResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -65,6 +79,7 @@ import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
import java.util.ArrayList;
import java.util.Arrays;
@ -112,9 +127,24 @@ public class QueryManagerImplTest {
@Mock
ObjectStoreDao objectStoreDao;
@Mock
VMInstanceDao vmInstanceDao;
@Mock
PrimaryDataStoreDao storagePoolDao;
@Mock
HostDao hostDao;
@Mock
ClusterDao clusterDao;
@Mock
BucketDao bucketDao;
@Mock
UserVmJoinDao userVmJoinDao;
private AccountVO account;
private UserVO user;
@ -352,4 +382,46 @@ public class QueryManagerImplTest {
when(bucketDao.searchAndCount(any(), any())).thenReturn(new Pair<>(buckets, 2));
queryManagerImplSpy.searchForBuckets(listBucketsCmd);
}
@Test
public void testListAffectedVmsForScopeChange() {
Long clusterId = 1L;
Long poolId = 2L;
Long hostId = 3L;
Long vmId = 4L;
String vmName = "VM1";
ListAffectedVmsForStorageScopeChangeCmd cmd = new ListAffectedVmsForStorageScopeChangeCmd();
ReflectionTestUtils.setField(cmd, "clusterIdForScopeChange", clusterId);
ReflectionTestUtils.setField(cmd, "storageId", poolId);
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
Mockito.when(pool.getScope()).thenReturn(ScopeType.CLUSTER);
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(pool);
ListResponse<VirtualMachineResponse> response = queryManager.listAffectedVmsForStorageScopeChange(cmd);
Assert.assertEquals(response.getResponses().size(), 0);
VMInstanceVO instance = Mockito.mock(VMInstanceVO.class);
UserVmJoinVO userVM = Mockito.mock(UserVmJoinVO.class);
String instanceUuid = String.valueOf(UUID.randomUUID());
Pair<List<VMInstanceVO>, Integer> vms = new Pair<>(List.of(instance), 1);
HostVO host = Mockito.mock(HostVO.class);
ClusterVO cluster = Mockito.mock(ClusterVO.class);
Mockito.when(pool.getScope()).thenReturn(ScopeType.ZONE);
Mockito.when(instance.getUuid()).thenReturn(instanceUuid);
Mockito.when(instance.getType()).thenReturn(VirtualMachine.Type.Instance);
Mockito.when(instance.getHostId()).thenReturn(hostId);
Mockito.when(instance.getId()).thenReturn(vmId);
Mockito.when(userVM.getDisplayName()).thenReturn(vmName);
Mockito.when(vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, poolId)).thenReturn(vms);
Mockito.when(userVmJoinDao.findById(vmId)).thenReturn(userVM);
Mockito.when(hostDao.findById(hostId)).thenReturn(host);
Mockito.when(host.getClusterId()).thenReturn(clusterId);
Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster);
response = queryManager.listAffectedVmsForStorageScopeChange(cmd);
Assert.assertEquals(response.getResponses().get(0).getId(), instanceUuid);
Assert.assertEquals(response.getResponses().get(0).getName(), vmName);
}
}

View File

@ -431,6 +431,17 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
return null;
}
@Override
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType type, long dcId, long clusterId) {
return null;
}
@Override
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisors(List<HypervisorType> types, long dcId, long clusterId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.resource.ResourceManager#listAvailHypervisorInZone(java.lang.Long, java.lang.Long)
*/

View File

@ -16,17 +16,22 @@
// under the License.
package com.cloud.storage;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.ConnectionException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountManagerImpl;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
@ -46,6 +51,7 @@ import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
import java.util.ArrayList;
import java.util.HashMap;
@ -67,10 +73,16 @@ public class StorageManagerImplTest {
@Mock
DataCenterDao dataCenterDao;
@Mock
AccountManager accountManager;
AccountManagerImpl accountMgr;
@Mock
StoragePoolDetailsDao storagePoolDetailsDao;
@Mock
ClusterDao clusterDao;
@Mock
PrimaryDataStoreDao storagePoolDao;
@Spy
@InjectMocks
private StorageManagerImpl storageManagerImpl;
@ -260,11 +272,74 @@ public class StorageManagerImplTest {
.update(StorageManager.DataStoreDownloadFollowRedirects.key(),StorageManager.DataStoreDownloadFollowRedirects.defaultValue());
}
private ChangeStoragePoolScopeCmd mockChangeStoragePooolScopeCmd(String newScope) {
ChangeStoragePoolScopeCmd cmd = new ChangeStoragePoolScopeCmd();
ReflectionTestUtils.setField(cmd, "id", 1L);
ReflectionTestUtils.setField(cmd, "clusterId", 1L);
ReflectionTestUtils.setField(cmd, "scope", newScope);
return cmd;
}
private StoragePoolVO mockStoragePoolVOForChangeStoragePoolScope(ScopeType currentScope, StoragePoolStatus status) {
StoragePoolVO primaryStorage = new StoragePoolVO();
primaryStorage.setId(1L);
primaryStorage.setDataCenterId(1L);
primaryStorage.setClusterId(1L);
primaryStorage.setStatus(StoragePoolStatus.Disabled);
primaryStorage.setScope(currentScope);
primaryStorage.setStatus(status);
return primaryStorage;
}
private void prepareTestChangeStoragePoolScope(ScopeType currentScope, StoragePoolStatus status) {
final DataCenterVO zone = new DataCenterVO(1L, null, null, null, null, null, null, null, null, null, DataCenter.NetworkType.Advanced, null, null);
StoragePoolVO primaryStorage = mockStoragePoolVOForChangeStoragePoolScope(currentScope, status);
Mockito.when(accountMgr.isRootAdmin(Mockito.any())).thenReturn(true);
Mockito.when(dataCenterDao.findById(1L)).thenReturn(zone);
Mockito.when(storagePoolDao.findById(1L)).thenReturn(primaryStorage);
}
@Test(expected = InvalidParameterValueException.class)
public void testChangeStoragePoolScopeNotDisabledException() {
prepareTestChangeStoragePoolScope(ScopeType.CLUSTER, StoragePoolStatus.Initialized);
ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("ZONE");
storageManagerImpl.changeStoragePoolScope(cmd);
}
@Test(expected = InvalidParameterValueException.class)
public void testChangeStoragePoolScopeToZoneHypervisorNotSupported() {
prepareTestChangeStoragePoolScope(ScopeType.CLUSTER, StoragePoolStatus.Disabled);
final ClusterVO cluster = new ClusterVO();
cluster.setHypervisorType(String.valueOf(HypervisorType.XenServer));
Mockito.when(clusterDao.findById(1L)).thenReturn(cluster);
ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("ZONE");
storageManagerImpl.changeStoragePoolScope(cmd);
}
@Test(expected = CloudRuntimeException.class)
public void testChangeStoragePoolScopeToClusterVolumesPresentException() {
prepareTestChangeStoragePoolScope(ScopeType.ZONE, StoragePoolStatus.Disabled);
final ClusterVO cluster = new ClusterVO();
Mockito.when(clusterDao.findById(1L)).thenReturn(cluster);
VMInstanceVO instance = Mockito.mock(VMInstanceVO.class);
Pair<List<VMInstanceVO>, Integer> vms = new Pair<>(List.of(instance), 1);
Mockito.when(vmInstanceDao.listByVmsNotInClusterUsingPool(1L, 1L)).thenReturn(vms);
ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("CLUSTER");
storageManagerImpl.changeStoragePoolScope(cmd);
}
@Test
public void testCheckNFSMountOptionsForCreateNoNFSMountOptions() {
Map<String, String> details = new HashMap<>();
try {
storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.XenServer, "");
storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, "");
} catch (Exception e) {
Assert.fail();
}
@ -275,8 +350,8 @@ public class StorageManagerImplTest {
Map<String, String> details = new HashMap<>();
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.XenServer, ""));
Assert.assertEquals(exception.getMessage(), "NFS options can not be set for the hypervisor type " + Hypervisor.HypervisorType.XenServer);
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, ""));
Assert.assertEquals(exception.getMessage(), "NFS options can not be set for the hypervisor type " + HypervisorType.XenServer);
}
@Test
@ -284,7 +359,7 @@ public class StorageManagerImplTest {
Map<String, String> details = new HashMap<>();
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.KVM, ""));
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, ""));
Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem);
}
@ -306,7 +381,7 @@ public class StorageManagerImplTest {
StoragePoolVO pool = new StoragePoolVO();
Long accountId = 1L;
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(false);
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(false);
PermissionDeniedException exception = Assert.assertThrows(PermissionDeniedException.class,
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
Assert.assertEquals(exception.getMessage(), "Only root admin can modify nfs options");
@ -318,11 +393,11 @@ public class StorageManagerImplTest {
StoragePoolVO pool = new StoragePoolVO();
Long accountId = 1L;
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
pool.setHypervisor(Hypervisor.HypervisorType.XenServer);
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
pool.setHypervisor(HypervisorType.XenServer);
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
Assert.assertEquals(exception.getMessage(), "NFS options can only be set for the hypervisor type " + Hypervisor.HypervisorType.KVM);
Assert.assertEquals(exception.getMessage(), "NFS options can only be set for the hypervisor type " + HypervisorType.KVM);
}
@Test
@ -331,8 +406,8 @@ public class StorageManagerImplTest {
StoragePoolVO pool = new StoragePoolVO();
Long accountId = 1L;
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
pool.setHypervisor(Hypervisor.HypervisorType.KVM);
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
pool.setHypervisor(HypervisorType.KVM);
pool.setPoolType(Storage.StoragePoolType.FiberChannel);
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
@ -345,8 +420,8 @@ public class StorageManagerImplTest {
StoragePoolVO pool = new StoragePoolVO();
Long accountId = 1L;
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
pool.setHypervisor(Hypervisor.HypervisorType.KVM);
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
pool.setHypervisor(HypervisorType.KVM);
pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
pool.setStatus(StoragePoolStatus.Up);
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
@ -359,7 +434,7 @@ public class StorageManagerImplTest {
String nfsMountOpts = "vers=4.1, nconnect=4,vers=4.2";
Map<String, String> details = new HashMap<>();
details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts);
storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.KVM, "nfs");
storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, "nfs");
}
@Test(expected = InvalidParameterValueException.class)
@ -368,11 +443,11 @@ public class StorageManagerImplTest {
Map<String, String> details = new HashMap<>();
details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts);
StoragePoolVO pool = new StoragePoolVO();
pool.setHypervisor(Hypervisor.HypervisorType.KVM);
pool.setHypervisor(HypervisorType.KVM);
pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
pool.setStatus(StoragePoolStatus.Maintenance);
Long accountId = 1L;
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId);
}

View File

@ -0,0 +1,176 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Primary Storage
"""
# Import System modules
# Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.lib.base import (Host, StoragePool, Cluster, updateStoragePool, changeStoragePoolScope)
from marvin.lib.common import (get_zone, get_pod, list_clusters)
from marvin.lib.utils import cleanup_resources
from nose.plugins.attrib import attr
class TestPrimaryStorageScope(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = self.testClient.getParsedTestDataConfig()
self._cleanup = []
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
self.pod = get_pod(self.apiclient, self.zone.id)
self.debug("here")
self.debug(self.services)
self.cluster1 = list_clusters(self.apiclient)[0]
self.debug("here1")
self.debug(self.cluster1)
self.cluster = {
'clustername': 'C0_testScope',
'clustertype': 'CloudManaged'
}
self.cluster2 = Cluster.create(self.apiclient,
self.cluster,
zoneid=self.zone.id,
podid=self.pod.id,
hypervisor=self.cluster1.hypervisortype
)
self._cleanup.append(self.cluster2)
self.storage = StoragePool.create(self.apiclient,
self.services["nfs"],
scope = 'ZONE',
zoneid=self.zone.id,
hypervisor=self.cluster1.hypervisortype
)
self._cleanup.append(self.storage)
self.debug("Created storage pool %s in zone scope", self.storage.id)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
def test_01_primary_storage_scope_change(self):
"""Test primary storage pool scope change
"""
# Disable storage pool
cmd = updateStoragePool.updateStoragePoolCmd()
cmd.id = self.storage.id
cmd.enabled = False
self.apiclient.updateStoragePool(cmd)
self.debug("Disabled storage pool : %s" % self.storage.id)
# Change storage pool scope to Cluster2
cmd = changeStoragePoolScope.changeStoragePoolScopeCmd()
cmd.id = self.storage.id
cmd.scope = "CLUSTER"
cmd.clusterid = self.cluster2.id
self.apiclient.changeStoragePoolScope(cmd)
self.debug("Changed scope of storage pool %s to cluster" % self.storage.id)
pool_id = self.dbclient.execute("select id from storage_pool where uuid=\"" + self.storage.id + "\"")[0][0]
host1 = Host.list(self.apiclient, clusterid=self.cluster1.id, listall=True)[0]
host1_id = self.dbclient.execute("select id from host where uuid=\"" + host1.id + "\"")[0][0]
pool_row = self.dbclient.execute("select cluster_id, pod_id, scope from storage_pool where id=" + str(pool_id))[0]
capacity_row = self.dbclient.execute("select cluster_id, pod_id from op_host_capacity where capacity_type=3 and host_id=" + str(pool_id))[0]
pool_host_rows = self.dbclient.execute("select id from storage_pool_host_ref where host_id=" + str(host1_id) + " and pool_id=" + str(pool_id))
self.assertIsNotNone(
pool_row[0],
"Cluster id should not be NULL for cluster scope"
)
self.assertIsNotNone(
pool_row[1],
"Pod id should not be NULL for cluster scope"
)
self.assertEqual(
pool_row[2],
"CLUSTER",
"Storage pool scope not changed to Cluster"
)
self.assertIsNotNone(
capacity_row[0],
"Cluster id should not be NULL in the op_host_capacity table"
)
self.assertIsNotNone(
capacity_row[1],
"Pod id set should not be NULL in the op_host_capacity table"
)
self.assertEqual(
len(pool_host_rows),
0,
"Storage pool not removed from the storage_pool_host_ref table for host on another cluster"
)
# Change storage pool scope to Zone
cmd = changeStoragePoolScope.changeStoragePoolScopeCmd()
cmd.id = self.storage.id
cmd.scope = "ZONE"
self.apiclient.changeStoragePoolScope(cmd)
self.debug("Changed scope of storage pool %s to zone" % self.storage.id)
pool_row = self.dbclient.execute("select cluster_id, pod_id, scope from storage_pool where id=" + str(pool_id))[0]
capacity_row = self.dbclient.execute("select cluster_id, pod_id from op_host_capacity where capacity_type=3 and host_id=" + str(pool_id))[0]
pool_host_rows = self.dbclient.execute("select id from storage_pool_host_ref where host_id=" + str(host1_id) + " and pool_id=" + str(pool_id))
self.assertIsNone(
pool_row[0],
"Cluster id not set to NULL for zone scope"
)
self.assertIsNone(
pool_row[1],
"Pod id not set to NULL for zone scope"
)
self.assertEqual(
pool_row[2],
"ZONE",
"Storage pool scope not changed to ZONE"
)
self.assertIsNone(
capacity_row[0],
"Cluster id not set to NULL in the op_host_capacity table"
)
self.assertIsNone(
capacity_row[1],
"Pod id not set to NULL in the op_host_capacity table"
)
self.assertEqual(
len(pool_host_rows),
1,
"Storage pool not added to the storage_pool_host_ref table for host on another cluster"
)
# Enable storage pool
cmd = updateStoragePool.updateStoragePoolCmd()
cmd.id = self.storage.id
cmd.enabled = True
response = self.apiclient.updateStoragePool(cmd)
self.assertEqual(
response.state,
"Up",
"Storage pool couldn't be enabled"
)

View File

@ -142,6 +142,7 @@ known_categories = {
'StorageMaintenance': 'Storage Pool',
'StoragePool': 'Storage Pool',
'StorageProvider': 'Storage Pool',
'StorageScope' : 'Storage Pool',
'updateStorageCapabilities' : 'Storage Pool',
'SecurityGroup': 'Security Group',
'SSH': 'SSH',

View File

@ -60,6 +60,7 @@
"label.action.bulk.release.public.ip.address": "Bulk release public IP addresses",
"label.action.cancel.maintenance.mode": "Cancel maintenance mode",
"label.action.change.password": "Change password",
"label.action.change.primary.storage.scope": "Change primary storage scope",
"label.action.configure.stickiness": "Stickiness",
"label.action.copy.iso": "Copy ISO",
"label.action.copy.snapshot": "Copy Snapshot",
@ -2483,6 +2484,8 @@
"message.action.manage.cluster": "Please confirm that you want to manage the cluster.",
"message.action.patch.router": "Please confirm that you want to live patch the router. <br> This operation is equivalent updating the router packages and restarting the Network without cleanup.",
"message.action.patch.systemvm": "Please confirm that you want to patch the System VM.",
"message.action.primary.storage.scope.cluster": "Please confirm that you want to change the scope from zone to the specified cluster.<br>This operation will update the database and disconnect the storage pool from all hosts that were previously connected to the primary storage and are not part of the specified cluster.",
"message.action.primary.storage.scope.zone": "Please confirm that you want to change the scope from cluster to zone.<br>This operation will update the database and connect the storage pool to all hosts of the zone running the same hypervisor as set on the storage pool.",
"message.action.primarystorage.enable.maintenance.mode": "Warning: placing the primary storage into maintenance mode will cause all Instances using volumes from it to be stopped. Do you want to continue?",
"message.action.reboot.instance": "Please confirm that you want to reboot this Instance.",
"message.action.reboot.router": "All services provided by this virtual router will be interrupted. Please confirm that you want to reboot this router.",
@ -2600,6 +2603,8 @@
"message.change.offering.for.volume.failed": "Change offering for the volume failed",
"message.change.offering.for.volume.processing": "Changing offering for the volume...",
"message.change.password": "Please change your password.",
"message.change.scope.failed": "Scope change failed",
"message.change.scope.processing": "Scope change in progress",
"message.cluster.dedicated": "Cluster Dedicated",
"message.cluster.dedication.released": "Cluster dedication released.",
"message.config.health.monitor.failed": "Configure Health Monitor failed",
@ -3188,6 +3193,7 @@
"message.success.change.affinity.group": "Successfully changed affinity groups",
"message.success.change.offering": "Successfully changed offering",
"message.success.change.password": "Successfully changed password for User",
"message.success.change.scope": "Successfully changed scope for storage pool",
"message.success.config.backup.schedule": "Successfully configured Instance backup schedule",
"message.success.config.health.monitor": "Successfully Configure Health Monitor",
"message.success.config.sticky.policy": "Successfully configured sticky policy",
@ -3374,6 +3380,7 @@
"message.volumes.unmanaged": "Volumes not controlled by CloudStack.",
"message.vr.alert.upon.network.offering.creation.l2": "As virtual routers are not created for L2 Networks, the compute offering will not be used.",
"message.vr.alert.upon.network.offering.creation.others": "As none of the obligatory services for creating a virtual router (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) are enabled, the virtual router will not be created and the compute offering will not be used.",
"message.warn.change.primary.storage.scope": "This feature is tested and supported for the following configurations:<br>KVM - NFS/Ceph - DefaultPrimary<br>VMware - NFS - DefaultPrimary<br>*There might be extra steps involved to make it work for other configurations.",
"message.warn.filetype": "jpg, jpeg, png, bmp and svg are the only supported image formats.",
"message.warn.importing.instance.without.nic": "WARNING: This Instance is being imported without NICs and many Network resources will not be available. Consider creating a NIC via vCenter before importing or as soon as the Instance is imported.",
"message.warn.zone.mtu.update": "Please note that this limit won't affect pre-existing Networks MTU settings",

View File

@ -135,6 +135,26 @@ export default {
dataView: true,
show: (record) => { return ['Maintenance', 'PrepareForMaintenance', 'ErrorInMaintenance'].includes(record.state) }
},
{
api: 'changeStoragePoolScope',
icon: 'swap-outlined',
label: 'label.action.change.primary.storage.scope',
dataView: true,
popup: true,
show: (record) => {
return (record.state === 'Disabled' &&
(record.scope === 'CLUSTER' ||
record.scope === 'ZONE') &&
(record.hypervisor === 'KVM' ||
record.hypervisor === 'VMware' ||
record.hypervisor === 'HyperV' ||
record.hypervisor === 'LXC' ||
record.hypervisor === 'Any' ||
record.hypervisor === 'Simulator')
)
},
component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ChangeStoragePoolScope.vue')))
},
{
api: 'deleteStoragePool',
icon: 'delete-outlined',

View File

@ -21,6 +21,7 @@ import {
ApiOutlined,
AppstoreOutlined,
ArrowDownOutlined,
ArrowRightOutlined,
ArrowUpOutlined,
ArrowsAltOutlined,
AuditOutlined,
@ -182,6 +183,7 @@ export default {
app.component('ApiOutlined', ApiOutlined)
app.component('AppstoreOutlined', AppstoreOutlined)
app.component('ArrowDownOutlined', ArrowDownOutlined)
app.component('ArrowRightOutlined', ArrowRightOutlined)
app.component('ArrowUpOutlined', ArrowUpOutlined)
app.component('ArrowsAltOutlined', ArrowsAltOutlined)
app.component('AuditOutlined', AuditOutlined)

View File

@ -0,0 +1,223 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
<template>
<a-spin :spinning="loading">
<div class="form-layout" v-ctrl-enter="handleSubmitForm">
<div class="form">
<a-form
:ref="formRef"
:model="form"
:rules="rules"
layout="vertical"
@submit="handleSubmitForm">
<a-alert type="warning">
<template #message>
<span
v-html="(resource.scope=='ZONE' ? $t('message.action.primary.storage.scope.cluster') : $t('message.action.primary.storage.scope.zone')) +
'<br><br>' + $t('message.warn.change.primary.storage.scope')"></span>
</template>
</a-alert>
<p></p>
<a-form-item name="clusterid" ref="clusterid" v-if="resource.scope=='ZONE'">
<template #label>
<tooltip-label :title="$t('label.clustername')" :tooltip="placeholder.clusterid"/>
</template>
<a-select
v-model:value="form.clusterid"
:placeholder="placeholder.clusterid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
@change="handleChangeCluster">
<a-select-option
v-for="cluster in clustersList"
:value="cluster.id"
:key="cluster.id"
:label="cluster.name">
{{ cluster.name }}
</a-select-option>
</a-select>
</a-form-item>
<div :span="24" class="action-button">
<a-button @click="closeAction">{{ $t('label.cancel') }}</a-button>
<a-button @click="handleSubmitForm" ref="submit" type="primary">{{ $t('label.ok') }}</a-button>
</div>
</a-form>
</div>
</div>
</a-spin>
</template>
<script>
import { ref, reactive, toRaw } from 'vue'
import { api } from '@/api'
import { mixinForm } from '@/utils/mixin'
import DedicateDomain from '../../components/view/DedicateDomain'
import ResourceIcon from '@/components/view/ResourceIcon'
import TooltipLabel from '@/components/widgets/TooltipLabel'
export default {
name: 'ChangeStoragePoolScope',
mixins: [mixinForm],
components: {
DedicateDomain,
ResourceIcon,
TooltipLabel
},
props: {
resource: {
type: Object,
required: true
}
},
data () {
return {
loading: false,
clustersList: [],
selectedCluster: null,
placeholder: {
clusterid: null
}
}
},
created () {
this.initForm()
this.fetchData()
},
methods: {
initForm () {
this.formRef = ref()
this.form = reactive({ })
this.rules = reactive({
clusterid: [{ required: true, message: this.$t('message.error.select') }]
})
},
fetchData () {
this.fetchClusters(this.resource.zoneid)
},
fetchClusters (zoneId) {
this.form.clusterid = null
this.clustersList = []
if (!zoneId) return
this.zoneId = zoneId
this.loading = true
api('listClusters', { zoneid: zoneId }).then(response => {
this.clustersList = response.listclustersresponse.cluster || []
this.form.clusterid = this.clustersList[0].id || null
if (this.form.clusterid) {
this.handleChangeCluster(this.form.clusterid)
}
}).catch(error => {
this.$notifyError(error)
this.clustersList = []
this.form.clusterid = null
}).finally(() => {
this.loading = false
})
},
handleChangeCluster (value) {
this.form.clusterid = value
this.selectedCluster = this.clustersList.find(i => i.id === this.form.clusterid)
},
handleSubmitForm () {
if (this.loading) return
this.formRef.value.validate().then(() => {
const formRaw = toRaw(this.form)
const values = this.handleRemoveFields(formRaw)
this.args = {}
if (this.resource.scope === 'ZONE') {
this.args = {
id: this.resource.id,
scope: 'CLUSTER',
clusterid: values.clusterid
}
} else {
this.args = {
id: this.resource.id,
scope: 'ZONE'
}
}
this.changeStoragePoolScope(this.args)
}).catch(error => {
this.formRef.value.scrollToField(error.errorFields[0].name)
})
},
closeAction () {
this.$emit('close-action')
},
changeStoragePoolScope (args) {
api('changeStoragePoolScope', args).then(json => {
this.$pollJob({
jobId: json.changestoragepoolscoperesponse.jobid,
title: this.$t('message.success.change.scope'),
description: args.name,
successMessage: this.$t('message.success.change.scope'),
successMethod: (result) => {
this.closeAction()
},
errorMessage: this.$t('message.change.scope.failed'),
loadingMessage: this.$t('message.change.scope.processing'),
catchMessage: this.$t('error.fetching.async.job.result')
})
this.closeAction()
}).catch(error => {
this.$notifyError(error)
}).finally(() => {
this.loading = false
})
}
}
}
</script>
<style lang="scss">
.form {
&__label {
margin-bottom: 5px;
.required {
margin-left: 10px;
}
}
&__item {
margin-bottom: 20px;
}
.ant-select {
width: 85vw;
@media (min-width: 760px) {
width: 400px;
}
}
}
.required {
color: #ff0000;
&-label {
display: none;
&--error {
display: block;
}
}
}
</style>