Merge branch 'main' into cks-enhancements-upstream

This commit is contained in:
nvazquez 2025-05-19 21:04:56 -03:00
commit 5e7f86b84f
No known key found for this signature in database
GPG Key ID: 656E1BCC8CB54F84
162 changed files with 6880 additions and 774 deletions

View File

@ -201,11 +201,12 @@ public interface ConfigurationService {
* TODO
* @param allocationState
* TODO
* @param storageAccessGroups
* @return the new pod if successful, null otherwise
* @throws
* @throws
*/
Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState);
Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List<String> storageAccessGroups);
/**
* Creates a mutual exclusive IP range in the pod with same gateway, netmask.

View File

@ -43,4 +43,6 @@ public interface Pod extends InfrastructureEntity, Grouping, Identity, InternalI
AllocationState getAllocationState();
boolean getExternalDhcp();
String getStorageAccessGroups();
}

View File

@ -467,6 +467,7 @@ public class EventTypes {
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
public static final String EVENT_CONFIGURE_STORAGE_ACCESS = "CONFIGURE.STORAGE.ACCESS";
public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE";
// VPN

View File

@ -213,4 +213,6 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
ResourceState getResourceState();
CPU.CPUArch getArch();
String getStorageAccessGroups();
}

View File

@ -41,4 +41,6 @@ public interface Cluster extends Grouping, Partition {
ManagedState getManagedState();
CPU.CPUArch getArch();
String getStorageAccessGroups();
}

View File

@ -95,4 +95,11 @@ public interface ResourceService {
boolean releaseHostReservation(Long hostId);
void updatePodStorageAccessGroups(long podId, List<String> newStorageAccessGroups);
void updateZoneStorageAccessGroups(long zoneId, List<String> newStorageAccessGroups);
void updateClusterStorageAccessGroups(Long clusterId, List<String> newStorageAccessGroups);
void updateHostStorageAccessGroups(Long hostId, List<String> newStorageAccessGroups);
}

View File

@ -22,6 +22,7 @@ import java.util.Map;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -99,6 +100,8 @@ public interface StorageService {
StoragePool disablePrimaryStoragePool(Long id);
boolean configureStorageAccess(ConfigureStorageAccessCmd cmd);
StoragePool getStoragePool(long id);
boolean deleteImageStore(DeleteImageStoreCmd cmd);

View File

@ -504,6 +504,11 @@ public class ApiConstants {
public static final String SYSTEM_VM_TYPE = "systemvmtype";
public static final String TAGS = "tags";
public static final String STORAGE_TAGS = "storagetags";
public static final String STORAGE_ACCESS_GROUPS = "storageaccessgroups";
public static final String STORAGE_ACCESS_GROUP = "storageaccessgroup";
public static final String CLUSTER_STORAGE_ACCESS_GROUPS = "clusterstorageaccessgroups";
public static final String POD_STORAGE_ACCESS_GROUPS = "podstorageaccessgroups";
public static final String ZONE_STORAGE_ACCESS_GROUPS = "zonestorageaccessgroups";
public static final String SUCCESS = "success";
public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine";
public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot";

View File

@ -310,6 +310,8 @@ public interface ResponseGenerator {
PodResponse createPodResponse(Pod pod, Boolean showCapacities);
PodResponse createMinimalPodResponse(Pod pod);
ZoneResponse createZoneResponse(ResponseView view, DataCenter dataCenter, Boolean showCapacities, Boolean showResourceIcon);
DataCenterGuestIpv6PrefixResponse createDataCenterGuestIpv6PrefixResponse(DataCenterGuestIpv6Prefix prefix);
@ -324,6 +326,8 @@ public interface ResponseGenerator {
ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities);
ClusterResponse createMinimalClusterResponse(Cluster cluster);
FirewallRuleResponse createPortForwardingRuleResponse(PortForwardingRule fwRule);
IpForwardingRuleResponse createIpForwardingRuleResponse(StaticNatRule fwRule);

View File

@ -118,6 +118,12 @@ public class AddClusterCmd extends BaseCmd {
private String ovm3cluster;
@Parameter(name = ApiConstants.OVM3_VIP, type = CommandType.STRING, required = false, description = "Ovm3 vip to use for pool (and cluster)")
private String ovm3vip;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the hosts in the cluster",
since = "4.21.0")
private List<String> storageAccessGroups;
public String getOvm3Pool() {
return ovm3pool;
}
@ -192,6 +198,10 @@ public class AddClusterCmd extends BaseCmd {
this.clusterType = type;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;

View File

@ -74,6 +74,11 @@ public class ListClustersCmd extends BaseListCmd {
since = "4.20.1")
private String arch;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -122,6 +127,18 @@ public class ListClustersCmd extends BaseListCmd {
return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch);
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListClustersCmd() {
}
public ListClustersCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -130,7 +130,7 @@ public class UpdateClusterCmd extends BaseCmd {
}
Cluster result = _resourceService.updateCluster(this);
if (result != null) {
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false);
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(result, false);
clusterResponse.setResponseName(getCommandName());
this.setResponseObject(clusterResponse);
} else {

View File

@ -75,6 +75,12 @@ public class AddHostCmd extends BaseCmd {
@Parameter(name = ApiConstants.HOST_TAGS, type = CommandType.LIST, collectionType = CommandType.STRING, description = "list of tags to be added to the host")
private List<String> hostTags;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the host",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -115,6 +121,10 @@ public class AddHostCmd extends BaseCmd {
return hostTags;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
public String getAllocationState() {
return allocationState;
}

View File

@ -113,6 +113,11 @@ public class ListHostsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, description = "CPU Arch of the host", since = "4.20.1")
private String arch;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -205,6 +210,18 @@ public class ListHostsCmd extends BaseListCmd {
return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch);
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListHostsCmd() {
}
public ListHostsCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -30,6 +30,8 @@ import org.apache.cloudstack.api.response.ZoneResponse;
import com.cloud.dc.Pod;
import com.cloud.user.Account;
import java.util.List;
@APICommand(name = "createPod", description = "Creates a new Pod.", responseObject = PodResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class CreatePodCmd extends BaseCmd {
@ -63,6 +65,12 @@ public class CreatePodCmd extends BaseCmd {
@Parameter(name = ApiConstants.ALLOCATION_STATE, type = CommandType.STRING, description = "Allocation state of this Pod for allocation of new resources")
private String allocationState;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the hosts in the pod",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -95,6 +103,10 @@ public class CreatePodCmd extends BaseCmd {
return allocationState;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@ -111,7 +123,7 @@ public class CreatePodCmd extends BaseCmd {
@Override
public void execute() {
Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState());
Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState(), getStorageAccessGroups());
if (result != null) {
PodResponse response = _responseGenerator.createPodResponse(result, false);
response.setResponseName(getCommandName());

View File

@ -55,6 +55,11 @@ public class ListPodsByCmd extends BaseListCmd {
@Parameter(name = ApiConstants.SHOW_CAPACITIES, type = CommandType.BOOLEAN, description = "flag to display the capacity of the pods")
private Boolean showCapacities;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -79,6 +84,18 @@ public class ListPodsByCmd extends BaseListCmd {
return showCapacities;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListPodsByCmd() {
}
public ListPodsByCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -0,0 +1,135 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.storage;
import java.util.List;
import com.cloud.event.EventTypes;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.PodResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import com.cloud.user.Account;
@APICommand(name = "configureStorageAccess", description = "Configure the storage access groups on zone/pod/cluster/host and storage, accordingly connections to the storage pools", responseObject = SuccessResponse.class, since = "4.21.0",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class ConfigureStorageAccessCmd extends BaseAsyncCmd {
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "UUID of the zone")
private Long zoneId;
@Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "UUID of the pod")
private Long podId;
@Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "UUID of the cluster")
private Long clusterId;
@Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "UUID of the host")
private Long hostId;
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "UUID of the Storage Pool")
private Long storageId;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for connecting the storage pools and the hosts",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getZoneId() {
return zoneId;
}
public Long getPodId() {
return podId;
}
public Long getClusterId() {
return clusterId;
}
public Long getHostId() {
return hostId;
}
public Long getStorageId() {
return storageId;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.StoragePool;
}
@Override
public void execute() {
try {
boolean result = _storageService.configureStorageAccess(this);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access");
}
} catch (Exception e) {
logger.debug("Failed to configure storage access ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access, " + e.getMessage());
}
}
@Override
public String getEventType() {
return EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS;
}
@Override
public String getEventDescription() {
return "configuring storage access groups";
}
}

View File

@ -61,6 +61,10 @@ public class CreateStoragePoolCmd extends BaseCmd {
@Parameter(name = ApiConstants.TAGS, type = CommandType.STRING, description = "the tags for the storage pool")
private String tags;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.STRING,
description = "comma separated list of storage access groups for connecting to hosts having those specific groups", since = "4.21.0")
private String storageAccessGroups;
@Parameter(name = ApiConstants.URL, type = CommandType.STRING, required = true, description = "the URL of the storage pool")
private String url;
@ -115,6 +119,10 @@ public class CreateStoragePoolCmd extends BaseCmd {
return tags;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public String getUrl() {
return url;
}

View File

@ -0,0 +1,65 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.api.command.admin.storage;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.StorageAccessGroupResponse;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.response.ListResponse;
@APICommand(name = "listStorageAccessGroups", description = "Lists storage access groups", responseObject = StorageAccessGroupResponse.class, since = "4.21.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class ListStorageAccessGroupsCmd extends BaseListCmd {
// ///////////////////////////////////////////////////
// ////////////// API parameters /////////////////////
// ///////////////////////////////////////////////////
@Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, description = "Name of the Storage access group")
private String name;
// ///////////////////////////////////////////////////
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getName() {
return name;
}
// ///////////////////////////////////////////////////
// ///////////// API Implementation///////////////////
// ///////////////////////////////////////////////////
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.StoragePool;
}
@Override
public void execute() {
ListResponse<StorageAccessGroupResponse> response = _queryService.searchForStorageAccessGroups(this);
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -41,7 +41,7 @@ public class ListStoragePoolsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.CLUSTER_ID,
type = CommandType.UUID,
entityType = ClusterResponse.class,
description = "list storage pools belongig to the specific cluster")
description = "list storage pools belonging to the specific cluster")
private Long clusterId;
@Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, description = "the IP address for the storage pool")
@ -74,6 +74,10 @@ public class ListStoragePoolsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.STORAGE_CUSTOM_STATS, type = CommandType.BOOLEAN, description = "If true, lists the custom stats of the storage pool", since = "4.18.1")
private Boolean customStats;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, description = "the name of the storage access group", since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -134,6 +138,17 @@ public class ListStoragePoolsCmd extends BaseListCmd {
return customStats != null && customStats;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListStoragePoolsCmd() {
}
public ListStoragePoolsCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -31,6 +31,8 @@ import org.apache.cloudstack.context.CallContext;
import com.cloud.dc.DataCenter;
import com.cloud.user.Account;
import java.util.List;
@APICommand(name = "createZone", description = "Creates a Zone.", responseObject = ZoneResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class CreateZoneCmd extends BaseCmd {
@ -88,6 +90,11 @@ public class CreateZoneCmd extends BaseCmd {
@Parameter(name = ApiConstants.IS_EDGE, type = CommandType.BOOLEAN, description = "true if the zone is an edge zone, false otherwise", since = "4.18.0")
private Boolean isEdge;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the hosts in the zone",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
@ -162,6 +169,10 @@ public class CreateZoneCmd extends BaseCmd {
return isEdge;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
@Override

View File

@ -69,6 +69,11 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd {
@Parameter(name = ApiConstants.SHOW_RESOURCE_ICON, type = CommandType.BOOLEAN, description = "flag to display the resource image for the zones")
private Boolean showIcon;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -109,6 +114,18 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd {
return showIcon != null ? showIcon : false;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListZonesCmd() {
}
public ListZonesCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -95,6 +95,18 @@ public class ClusterResponse extends BaseResponseWithAnnotations {
@Param(description = "CPU Arch of the hosts in the cluster", since = "4.20")
private String arch;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the host", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.POD_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the pod", since = "4.21.0")
private String podStorageAccessGroups;
@SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0")
private String zoneStorageAccessGroups;
public String getId() {
return id;
}
@ -259,4 +271,28 @@ public class ClusterResponse extends BaseResponseWithAnnotations {
public String getArch() {
return arch;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public String getPodStorageAccessGroups() {
return podStorageAccessGroups;
}
public void setPodStorageAccessGroups(String podStorageAccessGroups) {
this.podStorageAccessGroups = podStorageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) {
this.zoneStorageAccessGroups = zoneStorageAccessGroups;
}
}

View File

@ -302,6 +302,22 @@ public class HostResponse extends BaseResponseWithAnnotations {
@Param(description = "CPU Arch of the host", since = "4.20")
private String arch;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the host", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.CLUSTER_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the cluster", since = "4.21.0")
private String clusterStorageAccessGroups;
@SerializedName(ApiConstants.POD_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the pod", since = "4.21.0")
private String podStorageAccessGroups;
@SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0")
private String zoneStorageAccessGroups;
@Override
public String getObjectId() {
return this.getId();
@ -491,6 +507,38 @@ public class HostResponse extends BaseResponseWithAnnotations {
this.hostTags = hostTags;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public String getClusterStorageAccessGroups() {
return clusterStorageAccessGroups;
}
public void setClusterStorageAccessGroups(String clusterStorageAccessGroups) {
this.clusterStorageAccessGroups = clusterStorageAccessGroups;
}
public String getPodStorageAccessGroups() {
return podStorageAccessGroups;
}
public void setPodStorageAccessGroups(String podStorageAccessGroups) {
this.podStorageAccessGroups = podStorageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) {
this.zoneStorageAccessGroups = zoneStorageAccessGroups;
}
public String getExplicitHostTags() {
return explicitHostTags;
}

View File

@ -85,6 +85,14 @@ public class PodResponse extends BaseResponseWithAnnotations {
@Param(description = "the capacity of the Pod", responseObject = CapacityResponse.class)
private List<CapacityResponse> capacities;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the pod", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0")
private String zoneStorageAccessGroups;
public String getId() {
return id;
}
@ -184,4 +192,20 @@ public class PodResponse extends BaseResponseWithAnnotations {
public void setCapacities(List<CapacityResponse> capacities) {
this.capacities = capacities;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) {
this.zoneStorageAccessGroups = zoneStorageAccessGroups;
}
}

View File

@ -80,7 +80,7 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations {
@Param(description = "true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk")
private Boolean isVolatile;
@SerializedName("storagetags")
@SerializedName(ApiConstants.STORAGE_TAGS)
@Param(description = "the tags for the service offering")
private String tags;

View File

@ -0,0 +1,108 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param;
public class StorageAccessGroupResponse extends BaseResponse {
@SerializedName(ApiConstants.ID)
@Param(description = "the ID of the storage access group")
private String id;
@SerializedName(ApiConstants.NAME)
@Param(description = "the name of the storage access group")
private String name;
@SerializedName("hosts")
@Param(description = "List of Hosts in the Storage Access Group")
private ListResponse<HostResponse> hostResponseList;
@SerializedName("clusters")
@Param(description = "List of Clusters in the Storage Access Group")
private ListResponse<ClusterResponse> clusterResponseList;
@SerializedName("pods")
@Param(description = "List of Pods in the Storage Access Group")
private ListResponse<PodResponse> podResponseList;
@SerializedName("zones")
@Param(description = "List of Zones in the Storage Access Group")
private ListResponse<ZoneResponse> zoneResponseList;
@SerializedName("storagepools")
@Param(description = "List of Storage Pools in the Storage Access Group")
private ListResponse<StoragePoolResponse> storagePoolResponseList;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public ListResponse<HostResponse> getHostResponseList() {
return hostResponseList;
}
public void setHostResponseList(ListResponse<HostResponse> hostResponseList) {
this.hostResponseList = hostResponseList;
}
public ListResponse<ClusterResponse> getClusterResponseList() {
return clusterResponseList;
}
public void setClusterResponseList(ListResponse<ClusterResponse> clusterResponseList) {
this.clusterResponseList = clusterResponseList;
}
public ListResponse<PodResponse> getPodResponseList() {
return podResponseList;
}
public void setPodResponseList(ListResponse<PodResponse> podResponseList) {
this.podResponseList = podResponseList;
}
public ListResponse<ZoneResponse> getZoneResponseList() {
return zoneResponseList;
}
public void setZoneResponseList(ListResponse<ZoneResponse> zoneResponseList) {
this.zoneResponseList = zoneResponseList;
}
public ListResponse<StoragePoolResponse> getStoragePoolResponseList() {
return storagePoolResponseList;
}
public void setStoragePoolResponseList(ListResponse<StoragePoolResponse> storagePoolResponseList) {
this.storagePoolResponseList = storagePoolResponseList;
}
}

View File

@ -109,6 +109,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
@Param(description = "the tags for the storage pool")
private String tags;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "the storage access groups for the storage pool", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.NFS_MOUNT_OPTIONS)
@Param(description = "the nfs mount options for the storage pool", since = "4.19.1")
private String nfsMountOpts;
@ -344,6 +348,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
this.tags = tags;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public Boolean getIsTagARule() {
return isTagARule;
}

View File

@ -79,6 +79,14 @@ public class UnmanagedInstanceResponse extends BaseResponse {
@Param(description = "the operating system of the virtual machine")
private String operatingSystem;
@SerializedName(ApiConstants.BOOT_MODE)
@Param(description = "indicates the boot mode")
private String bootMode;
@SerializedName(ApiConstants.BOOT_TYPE)
@Param(description = "indicates the boot type")
private String bootType;
@SerializedName(ApiConstants.DISK)
@Param(description = "the list of disks associated with the virtual machine", responseObject = UnmanagedInstanceDiskResponse.class)
private Set<UnmanagedInstanceDiskResponse> disks;
@ -211,4 +219,20 @@ public class UnmanagedInstanceResponse extends BaseResponse {
public void addNic(NicResponse nic) {
this.nics.add(nic);
}
public String getBootMode() {
return bootMode;
}
public void setBootMode(String bootMode) {
this.bootMode = bootMode;
}
public String getBootType() {
return bootType;
}
public void setBootType(String bootType) {
this.bootType = bootType;
}
}

View File

@ -95,7 +95,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
@SerializedName("securitygroupsenabled")
@Param(description = "true if security groups support is enabled, false otherwise")
private boolean securityGroupsEnabled;
private Boolean securityGroupsEnabled;
@SerializedName("allocationstate")
@Param(description = "the allocation state of the cluster")
@ -115,7 +115,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
@SerializedName(ApiConstants.LOCAL_STORAGE_ENABLED)
@Param(description = "true if local storage offering enabled, false otherwise")
private boolean localStorageEnabled;
private Boolean localStorageEnabled;
@SerializedName(ApiConstants.TAGS)
@Param(description = "the list of resource tags associated with zone.", responseObject = ResourceTagResponse.class, since = "4.3")
@ -161,11 +161,19 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
@Param(description = "true, if routed network/vpc is enabled", since = "4.20.1")
private boolean routedModeEnabled = false;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the zone", since = "4.21.0")
private String storageAccessGroups;
public ZoneResponse() {
tags = new LinkedHashSet<ResourceTagResponse>();
}
public ZoneResponse(Set<ResourceTagResponse> tags) {
this.tags = tags;
}
public void setId(String id) {
this.id = id;
}
@ -402,6 +410,14 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
return type;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public void setNsxEnabled(boolean nsxEnabled) {
this.nsxEnabled = nsxEnabled;
}

View File

@ -32,6 +32,7 @@ import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd;
import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
@ -87,6 +88,7 @@ import org.apache.cloudstack.api.response.SecondaryStorageHeuristicsResponse;
import org.apache.cloudstack.api.response.SecurityGroupResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.api.response.SnapshotResponse;
import org.apache.cloudstack.api.response.StorageAccessGroupResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.api.response.TemplateResponse;
@ -197,6 +199,8 @@ public interface QueryService {
ListResponse<StorageTagResponse> searchForStorageTags(ListStorageTagsCmd cmd);
ListResponse<StorageAccessGroupResponse> searchForStorageAccessGroups(ListStorageAccessGroupsCmd cmd);
ListResponse<HostTagResponse> searchForHostTags(ListHostTagsCmd cmd);
ListResponse<ManagementServerResponse> listManagementServers(ListMgmtsCmd cmd);

View File

@ -61,6 +61,9 @@ public class UnmanagedInstanceTO {
private String vncPassword;
private String bootType;
private String bootMode;
public String getName() {
return name;
}
@ -196,6 +199,22 @@ public class UnmanagedInstanceTO {
this, "name", "internalCSName", "hostName", "clusterName"));
}
public String getBootType() {
return bootType;
}
public void setBootType(String bootType) {
this.bootType = bootType;
}
public String getBootMode() {
return bootMode;
}
public void setBootMode(String bootMode) {
this.bootMode = bootMode;
}
public static class Disk {
private String diskId;

View File

@ -284,6 +284,11 @@ public class CheckOnHostCommandTest {
public CPU.CPUArch getArch() {
return CPU.CPUArch.amd64;
}
@Override
public String getStorageAccessGroups() {
return null;
}
};
CheckOnHostCommand cohc = new CheckOnHostCommand(host);

View File

@ -30,6 +30,7 @@ public class PrimaryDataStoreParameters {
private String providerName;
private Map<String, String> details;
private String tags;
private String storageAccessGroups;
private StoragePoolType type;
private HypervisorType hypervisorType;
private String host;
@ -165,6 +166,21 @@ public class PrimaryDataStoreParameters {
this.tags = tags;
}
/**
* @return the storageAccessGroups
*/
public String getStorageAccessGroups() {
return storageAccessGroups;
}
/**
* @param storageAccessGroups
* the storageAccessGroups to set
*/
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
/**
* @return the details
*/

View File

@ -64,4 +64,5 @@ public interface StoragePoolAllocator extends Adapter {
static int RETURN_UPTO_ALL = -1;
List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh);
}

View File

@ -147,12 +147,12 @@ public interface ConfigurationManager {
* @param startIp
* @param endIp
* @param allocationState
* @param skipGatewayOverlapCheck
* (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD)
* @param skipGatewayOverlapCheck (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD)
* @param storageAccessGroups
* @return Pod
*/
HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway, String cidr, String startIp, String endIp, String allocationState,
boolean skipGatewayOverlapCheck);
boolean skipGatewayOverlapCheck, List<String> storageAccessGroups);
/**
* Creates a new zone
@ -170,13 +170,14 @@ public interface ConfigurationManager {
* @param isSecurityGroupEnabled
* @param ip6Dns1
* @param ip6Dns2
* @param storageAccessGroups
* @return
* @throws
* @throws
*/
DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain,
Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1,
String ip6Dns2, boolean isEdge);
String ip6Dns2, boolean isEdge, List<String> storageAccessGroups);
/**
* Deletes a VLAN from the database, along with all of its IP addresses. Will not delete VLANs that have allocated

View File

@ -21,6 +21,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
@ -236,4 +238,12 @@ public interface ResourceManager extends ResourceService, Configurable {
HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId);
boolean cancelMaintenance(final long hostId);
void updateStoragePoolConnectionsOnHosts(Long poolId, List<String> storageAccessGroups);
List<HostVO> getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore);
List<HostVO> getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore);
List<HostVO> getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType);
}

View File

@ -410,4 +410,9 @@ public interface StorageManager extends StorageService {
void validateChildDatastoresToBeAddedInUpState(StoragePoolVO datastoreClusterPool, List<ModifyStoragePoolAnswer> childDatastoreAnswerList);
boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool);
Pair<Boolean, String> checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume);
String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId);
}

View File

@ -210,7 +210,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
scanDirectAgentToLoad();
}
private void scanDirectAgentToLoad() {
protected void scanDirectAgentToLoad() {
logger.trace("Begin scanning directly connected hosts");
// for agents that are self-managed, threshold to be considered as disconnected after pingtimeout
@ -231,11 +231,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
logger.info("{} is detected down, but we have a forward attache running, disconnect this one before launching the host", host);
removeAgent(agentattache, Status.Disconnected);
} else {
continue;
logger.debug("Host {} status is {} but has an AgentAttache which is not forForward, try to load directly", host, host.getStatus());
Status hostStatus = investigate(agentattache);
if (Status.Up == hostStatus) {
/* Got ping response from host, bring it back */
logger.info("After investigation, Agent for host {} is determined to be up and running", host);
agentStatusTransitTo(host, Event.Ping, _nodeId);
} else {
logger.debug("After investigation, AgentAttache is not null but host status is {}, try to load directly {}", hostStatus, host);
loadDirectlyConnectedHost(host, false);
}
}
} else {
logger.debug("AgentAttache is null, loading directly connected {}", host);
loadDirectlyConnectedHost(host, false);
}
logger.debug("Loading directly connected {}", host);
loadDirectlyConnectedHost(host, false);
} catch (final Throwable e) {
logger.warn(" can not load directly connected {} due to ", host, e);
}
@ -381,20 +391,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return;
}
if (!result) {
throw new CloudRuntimeException("Failed to propagate agent change request event:" + Event.ShutdownRequested + " to host:" + hostId);
throw new CloudRuntimeException(String.format("Failed to propagate agent change request event: %s to host: %s", Event.ShutdownRequested, hostId));
}
}
public void notifyNodesInCluster(final AgentAttache attache) {
logger.debug("Notifying other nodes of to disconnect");
final Command[] cmds = new Command[] {new ChangeAgentCommand(attache.getId(), Event.AgentDisconnected)};
final Command[] cmds = new Command[]{new ChangeAgentCommand(attache.getId(), Event.AgentDisconnected)};
_clusterMgr.broadcast(attache.getId(), _gson.toJson(cmds));
}
// notifies MS peers to schedule a host scan task immediately, triggered during addHost operation
public void notifyNodesInClusterToScheduleHostScanTask() {
logger.debug("Notifying other MS nodes to run host scan task");
final Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()};
final Command[] cmds = new Command[]{new ScheduleHostScanTaskCommand()};
_clusterMgr.broadcast(0, _gson.toJson(cmds));
}
@ -435,7 +445,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
try {
logD(bytes, "Routing to peer");
Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine);
Link.write(ch, new ByteBuffer[]{ByteBuffer.wrap(bytes)}, sslEngine);
return true;
} catch (final IOException e) {
try {
@ -954,7 +964,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (!_agentToTransferIds.isEmpty()) {
logger.debug("Found {} agents to transfer", _agentToTransferIds.size());
// for (Long hostId : _agentToTransferIds) {
for (final Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext();) {
for (final Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext(); ) {
final Long hostId = iterator.next();
final AgentAttache attache = findAttache(hostId);
@ -1095,7 +1105,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return;
}
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)attache;
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache) attache;
if (success) {
@ -1146,10 +1156,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
synchronized (_agents) {
final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId);
final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache) _agents.get(hostId);
if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true);
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host);
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache) createAttache(host);
if (forwardAttache == null) {
logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", host);
return false;
@ -1253,7 +1263,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted
final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0];
final ChangeAgentCommand cmd = (ChangeAgentCommand) cmds[0];
logger.debug("Intercepting command for agent change: agent {} event: {}", cmd.getAgentId(), cmd.getEvent());
boolean result;
@ -1270,7 +1280,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
answers[0] = new ChangeAgentAnswer(cmd, result);
return _gson.toJson(answers);
} else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) {
final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0];
final TransferAgentCommand cmd = (TransferAgentCommand) cmds[0];
logger.debug("Intercepting command for agent rebalancing: agent: {}, event: {}, connection transfer: {}", cmd.getAgentId(), cmd.getEvent(), cmd.isConnectionTransfer());
boolean result;
@ -1289,7 +1299,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
answers[0] = new Answer(cmd, result, null);
return _gson.toJson(answers);
} else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) {
final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0];
final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand) cmds[0];
logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId()));
@ -1306,10 +1316,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
answers[0] = new Answer(cmd, result, null);
return _gson.toJson(answers);
} else if (cmds.length == 1 && cmds[0] instanceof ScheduleHostScanTaskCommand) {
final ScheduleHostScanTaskCommand cmd = (ScheduleHostScanTaskCommand)cmds[0];
final ScheduleHostScanTaskCommand cmd = (ScheduleHostScanTaskCommand) cmds[0];
return handleScheduleHostScanTaskCommand(cmd);
} else if (cmds.length == 1 && cmds[0] instanceof BaseShutdownManagementServerHostCommand) {
final BaseShutdownManagementServerHostCommand cmd = (BaseShutdownManagementServerHostCommand)cmds[0];
final BaseShutdownManagementServerHostCommand cmd = (BaseShutdownManagementServerHostCommand) cmds[0];
return handleShutdownManagementServerHostCommand(cmd);
}
@ -1362,7 +1372,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
managementServerMaintenanceManager.prepareForShutdown();
return "Successfully prepared for shutdown";
} catch(CloudRuntimeException e) {
} catch (CloudRuntimeException e) {
return e.getMessage();
}
}
@ -1371,7 +1381,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
managementServerMaintenanceManager.triggerShutdown();
return "Successfully triggered shutdown";
} catch(CloudRuntimeException e) {
} catch (CloudRuntimeException e) {
return e.getMessage();
}
}
@ -1380,7 +1390,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
managementServerMaintenanceManager.cancelShutdown();
return "Successfully cancelled shutdown";
} catch(CloudRuntimeException e) {
} catch (CloudRuntimeException e) {
return e.getMessage();
}
}

View File

@ -61,7 +61,11 @@ import org.apache.cloudstack.ca.CAManager;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.framework.ca.Certificate;
@ -413,6 +417,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
ResourceCleanupService resourceCleanupService;
@Inject
VmWorkJobDao vmWorkJobDao;
@Inject
DataStoreProviderManager dataStoreProviderManager;
private SingleCache<List<Long>> vmIdsInProgressCache;
@ -1238,6 +1244,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
planChangedByVolume = true;
}
}
DataStoreProvider storeProvider = dataStoreProviderManager.getDataStoreProvider(pool.getStorageProviderName());
if (storeProvider != null) {
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
if (storeDriver instanceof PrimaryDataStoreDriver) {
((PrimaryDataStoreDriver)storeDriver).detachVolumeFromAllStorageNodes(vol);
}
}
}
}

View File

@ -114,6 +114,9 @@ public class EngineClusterVO implements EngineCluster, Identity {
@Column(name = "engine_state", updatable = true, nullable = false, length = 32)
protected State state = null;
@Column(name = "storage_access_groups")
String storageAccessGroups;
public EngineClusterVO() {
clusterType = Cluster.ClusterType.CloudManaged;
allocationState = Grouping.AllocationState.Enabled;
@ -176,6 +179,11 @@ public class EngineClusterVO implements EngineCluster, Identity {
return managedState;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setManagedState(ManagedState managedState) {
this.managedState = managedState;
}

View File

@ -89,6 +89,9 @@ public class EngineHostPodVO implements EnginePod, Identity {
@Temporal(value = TemporalType.TIMESTAMP)
protected Date lastUpdated;
@Column(name = "storage_access_groups")
String storageAccessGroups;
/**
* Note that state is intentionally missing the setter. Any updates to
* the state machine needs to go through the DAO object because someone
@ -202,6 +205,11 @@ public class EngineHostPodVO implements EnginePod, Identity {
return externalDhcp;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setExternalDhcp(boolean use) {
externalDhcp = use;
}

View File

@ -405,6 +405,9 @@ public class EngineHostVO implements EngineHost, Identity {
@Column(name = "engine_state", updatable = true, nullable = false, length = 32)
protected State orchestrationState = null;
@Column(name = "storage_access_groups")
private String storageAccessGroups = null;
public EngineHostVO(String guid) {
this.guid = guid;
this.status = Status.Creating;
@ -807,4 +810,13 @@ public class EngineHostVO implements EngineHost, Identity {
public PartitionType partitionType() {
return PartitionType.Host;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
}

View File

@ -0,0 +1,150 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.manager;
import com.cloud.configuration.ManagementServiceConfiguration;
import com.cloud.ha.HighAvailabilityManagerImpl;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDao;
import com.cloud.resource.ResourceManagerImpl;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.ArrayList;
import java.util.List;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class ClusteredAgentManagerImplTest {
private HostDao _hostDao;
@Mock
ManagementServiceConfiguration _mgmtServiceConf;
@Before
public void setUp() throws Exception {
_hostDao = mock(HostDao.class);
}
@Test
public void scanDirectAgentToLoadNoHostsTest() {
ClusteredAgentManagerImpl clusteredAgentManagerImpl = mock(ClusteredAgentManagerImpl.class);
clusteredAgentManagerImpl._hostDao = _hostDao;
clusteredAgentManagerImpl.scanDirectAgentToLoad();
verify(clusteredAgentManagerImpl, never()).findAttache(anyLong());
verify(clusteredAgentManagerImpl, never()).loadDirectlyConnectedHost(any(), anyBoolean());
}
@Test
public void scanDirectAgentToLoadHostWithoutAttacheTest() {
// Arrange
ClusteredAgentManagerImpl clusteredAgentManagerImpl = Mockito.spy(ClusteredAgentManagerImpl.class);
HostVO hostVO = mock(HostVO.class);
clusteredAgentManagerImpl._hostDao = _hostDao;
clusteredAgentManagerImpl.mgmtServiceConf = _mgmtServiceConf;
clusteredAgentManagerImpl._resourceMgr = mock(ResourceManagerImpl.class);
when(_mgmtServiceConf.getTimeout()).thenReturn(16000L);
when(hostVO.getId()).thenReturn(1L);
List hosts = new ArrayList<>();
hosts.add(hostVO);
when(_hostDao.findAndUpdateDirectAgentToLoad(anyLong(), anyLong(), anyLong())).thenReturn(hosts);
AgentAttache agentAttache = mock(AgentAttache.class);
doReturn(Boolean.TRUE).when(clusteredAgentManagerImpl).loadDirectlyConnectedHost(hostVO, false);
clusteredAgentManagerImpl.scanDirectAgentToLoad();
verify(clusteredAgentManagerImpl).loadDirectlyConnectedHost(hostVO, false);
}
@Test
public void scanDirectAgentToLoadHostWithForwardAttacheTest() {
ClusteredAgentManagerImpl clusteredAgentManagerImpl = Mockito.spy(ClusteredAgentManagerImpl.class);
HostVO hostVO = mock(HostVO.class);
clusteredAgentManagerImpl._hostDao = _hostDao;
clusteredAgentManagerImpl.mgmtServiceConf = _mgmtServiceConf;
when(_mgmtServiceConf.getTimeout()).thenReturn(16000L);
when(hostVO.getId()).thenReturn(1L);
List hosts = new ArrayList<>();
hosts.add(hostVO);
when(_hostDao.findAndUpdateDirectAgentToLoad(anyLong(), anyLong(), anyLong())).thenReturn(hosts);
AgentAttache agentAttache = mock(AgentAttache.class);
when(agentAttache.forForward()).thenReturn(Boolean.TRUE);
when(clusteredAgentManagerImpl.findAttache(1L)).thenReturn(agentAttache);
clusteredAgentManagerImpl.scanDirectAgentToLoad();
verify(clusteredAgentManagerImpl).removeAgent(agentAttache, Status.Disconnected);
}
@Test
public void scanDirectAgentToLoadHostWithNonForwardAttacheTest() {
// Arrange
ClusteredAgentManagerImpl clusteredAgentManagerImpl = Mockito.spy(new ClusteredAgentManagerImpl());
HostVO hostVO = mock(HostVO.class);
clusteredAgentManagerImpl._hostDao = _hostDao;
clusteredAgentManagerImpl.mgmtServiceConf = _mgmtServiceConf;
clusteredAgentManagerImpl._haMgr = mock(HighAvailabilityManagerImpl.class);
when(_mgmtServiceConf.getTimeout()).thenReturn(16000L);
when(hostVO.getId()).thenReturn(0L);
List hosts = new ArrayList<>();
hosts.add(hostVO);
when(_hostDao.findAndUpdateDirectAgentToLoad(anyLong(), anyLong(), anyLong())).thenReturn(hosts);
AgentAttache agentAttache = mock(AgentAttache.class);
when(agentAttache.forForward()).thenReturn(Boolean.FALSE);
when(clusteredAgentManagerImpl.findAttache(0L)).thenReturn(agentAttache);
doReturn(Boolean.TRUE).when(clusteredAgentManagerImpl).agentStatusTransitTo(hostVO, Status.Event.Ping, clusteredAgentManagerImpl._nodeId);
doReturn(Status.Up).when(clusteredAgentManagerImpl).investigate(agentAttache);
clusteredAgentManagerImpl.scanDirectAgentToLoad();
verify(clusteredAgentManagerImpl).investigate(agentAttache);
verify(clusteredAgentManagerImpl).agentStatusTransitTo(hostVO, Status.Event.Ping, clusteredAgentManagerImpl._nodeId);
}
@Test
public void scanDirectAgentToLoadHostWithNonForwardAttacheAndDisconnectedTest() {
ClusteredAgentManagerImpl clusteredAgentManagerImpl = Mockito.spy(ClusteredAgentManagerImpl.class);
HostVO hostVO = mock(HostVO.class);
clusteredAgentManagerImpl._hostDao = _hostDao;
clusteredAgentManagerImpl.mgmtServiceConf = _mgmtServiceConf;
clusteredAgentManagerImpl._haMgr = mock(HighAvailabilityManagerImpl.class);
clusteredAgentManagerImpl._resourceMgr = mock(ResourceManagerImpl.class);
when(_mgmtServiceConf.getTimeout()).thenReturn(16000L);
when(hostVO.getId()).thenReturn(0L);
List hosts = new ArrayList<>();
hosts.add(hostVO);
when(_hostDao.findAndUpdateDirectAgentToLoad(anyLong(), anyLong(), anyLong())).thenReturn(hosts);
AgentAttache agentAttache = mock(AgentAttache.class);
when(agentAttache.forForward()).thenReturn(Boolean.FALSE);
when(clusteredAgentManagerImpl.findAttache(0L)).thenReturn(agentAttache);
doReturn(Boolean.TRUE).when(clusteredAgentManagerImpl).loadDirectlyConnectedHost(hostVO, false);
clusteredAgentManagerImpl.scanDirectAgentToLoad();
verify(clusteredAgentManagerImpl).investigate(agentAttache);
verify(clusteredAgentManagerImpl).loadDirectlyConnectedHost(hostVO, false);
}
}

View File

@ -85,6 +85,10 @@ public class ClusterVO implements Cluster {
@Column(name = "uuid")
String uuid;
@Column(name = "storage_access_groups")
String storageAccessGroups;
public ClusterVO() {
clusterType = Cluster.ClusterType.CloudManaged;
allocationState = Grouping.AllocationState.Enabled;
@ -215,6 +219,14 @@ public class ClusterVO implements Cluster {
this.arch = arch;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
@Override
public String toString() {
return String.format("Cluster {id: \"%s\", name: \"%s\", uuid: \"%s\"}", id, name, uuid);

View File

@ -142,6 +142,9 @@ public class DataCenterVO implements DataCenter {
@Enumerated(value = EnumType.STRING)
private DataCenter.Type type;
@Column(name = "storage_access_groups")
String storageAccessGroups;
@Override
public String getDnsProvider() {
return dnsProvider;
@ -485,6 +488,14 @@ public class DataCenterVO implements DataCenter {
this.type = type;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
@Override
public String toString() {
return String.format("Zone {\"id\": \"%s\", \"name\": \"%s\", \"uuid\": \"%s\"}", id, name, uuid);

View File

@ -71,6 +71,9 @@ public class HostPodVO implements Pod {
@Column(name = "uuid")
private String uuid;
@Column(name = "storage_access_groups")
String storageAccessGroups;
public HostPodVO(String name, long dcId, String gateway, String cidrAddress, int cidrSize, String description) {
this.name = name;
this.dataCenterId = dcId;
@ -199,6 +202,14 @@ public class HostPodVO implements Pod {
this.uuid = uuid;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
@Override
public String toString() {
return String.format("HostPod %s",

View File

@ -57,4 +57,6 @@ public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<CPU.CPUArch> getClustersArchsByZone(long zoneId);
List<ClusterVO> listClustersByArchAndZoneId(long zoneId, CPU.CPUArch arch);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -346,4 +346,36 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
sc.setParameters("arch", arch);
return listBy(sc);
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<ClusterVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -117,4 +117,6 @@ public interface DataCenterDao extends GenericDao<DataCenterVO, Long> {
List<DataCenterVO> listAllZones();
List<DataCenterVO> listByIds(List<Long> ids);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -25,6 +25,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.utils.db.GenericSearchBuilder;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
@ -441,4 +442,36 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
sc.setParameters("ids", ids.toArray());
return listBy(sc);
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<DataCenterVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -34,4 +34,6 @@ public interface HostPodDao extends GenericDao<HostPodVO, Long> {
public List<Long> listAllPods(Long zoneId);
public List<HostPodVO> listAllPodsByCidr(long zoneId, String cidr);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -143,4 +143,36 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
return listBy(sc);
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<HostPodVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -165,6 +165,9 @@ public class HostVO implements Host {
@Column(name = "uuid")
private String uuid;
@Column(name = "storage_access_groups")
String storageAccessGroups;
// This is a delayed load value. If the value is null,
// then this field has not been loaded yet.
// Call host dao to load it.
@ -357,6 +360,15 @@ public class HostVO implements Host {
return isTagARule;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public HashMap<String, HashMap<String, VgpuTypesInfo>> getGpuGroupDetails() {
return groupDetails;
}

View File

@ -84,6 +84,10 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> findHypervisorHostInCluster(long clusterId);
List<HostVO> findHypervisorHostInPod(long podId);
List<HostVO> findHypervisorHostInZone(long zoneId);
HostVO findAnyStateHypervisorHostInCluster(long clusterId);
HostVO findOldestExistentHypervisorHostInCluster(long clusterId);
@ -96,10 +100,14 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> findByPodId(Long podId);
List<HostVO> findByPodId(Long podId, Type type);
List<Long> listIdsByPodId(Long podId);
List<HostVO> findByClusterId(Long clusterId);
List<HostVO> findByClusterId(Long clusterId, Type type);
List<Long> listIdsByClusterId(Long clusterId);
List<Long> listIdsForUpRouting(Long zoneId, Long podId, Long clusterId);
@ -221,4 +229,6 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> listByIds(final List<Long> ids);
Long findClusterIdByVolumeInfo(VolumeInfo volumeInfo);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -107,7 +107,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
protected SearchBuilder<HostVO> IdStatusSearch;
protected SearchBuilder<HostVO> TypeDcSearch;
protected SearchBuilder<HostVO> TypeDcStatusSearch;
protected SearchBuilder<HostVO> TypeClusterStatusSearch;
protected SearchBuilder<HostVO> TypeStatusStateSearch;
protected SearchBuilder<HostVO> MsStatusSearch;
protected SearchBuilder<HostVO> DcPrivateIpAddressSearch;
protected SearchBuilder<HostVO> DcStorageIpAddressSearch;
@ -266,12 +266,14 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
TypeDcStatusSearch.and("resourceState", TypeDcStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
TypeDcStatusSearch.done();
TypeClusterStatusSearch = createSearchBuilder();
TypeClusterStatusSearch.and("type", TypeClusterStatusSearch.entity().getType(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.and("cluster", TypeClusterStatusSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.and("status", TypeClusterStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.done();
TypeStatusStateSearch = createSearchBuilder();
TypeStatusStateSearch.and("type", TypeStatusStateSearch.entity().getType(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("cluster", TypeStatusStateSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("pod", TypeStatusStateSearch.entity().getPodId(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("zone", TypeStatusStateSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("status", TypeStatusStateSearch.entity().getStatus(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("resourceState", TypeStatusStateSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.done();
IdsSearch = createSearchBuilder();
IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN);
@ -328,10 +330,12 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
PodSearch = createSearchBuilder();
PodSearch.and("podId", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
PodSearch.and("type", PodSearch.entity().getType(), Op.EQ);
PodSearch.done();
ClusterSearch = createSearchBuilder();
ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
ClusterSearch.and("type", ClusterSearch.entity().getType(), Op.EQ);
ClusterSearch.done();
TypeSearch = createSearchBuilder();
@ -1238,8 +1242,16 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public List<HostVO> findByPodId(Long podId) {
return findByPodId(podId, null);
}
@Override
public List<HostVO> findByPodId(Long podId, Type type) {
SearchCriteria<HostVO> sc = PodSearch.create();
sc.setParameters("podId", podId);
if (type != null) {
sc.setParameters("type", Type.Routing);
}
return listBy(sc);
}
@ -1250,8 +1262,16 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public List<HostVO> findByClusterId(Long clusterId) {
return findByClusterId(clusterId, null);
}
@Override
public List<HostVO> findByClusterId(Long clusterId, Type type) {
SearchCriteria<HostVO> sc = ClusterSearch.create();
sc.setParameters("clusterId", clusterId);
if (type != null) {
sc.setParameters("type", Type.Routing);
}
return listBy(sc);
}
@ -1355,7 +1375,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public List<HostVO> findHypervisorHostInCluster(long clusterId) {
SearchCriteria<HostVO> sc = TypeClusterStatusSearch.create();
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("cluster", clusterId);
sc.setParameters("status", Status.Up);
@ -1364,9 +1384,31 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return listBy(sc);
}
@Override
public List<HostVO> findHypervisorHostInZone(long zoneId) {
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("zone", zoneId);
sc.setParameters("status", Status.Up);
sc.setParameters("resourceState", ResourceState.Enabled);
return listBy(sc);
}
@Override
public List<HostVO> findHypervisorHostInPod(long podId) {
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("pod", podId);
sc.setParameters("status", Status.Up);
sc.setParameters("resourceState", ResourceState.Enabled);
return listBy(sc);
}
@Override
public HostVO findAnyStateHypervisorHostInCluster(long clusterId) {
SearchCriteria<HostVO> sc = TypeClusterStatusSearch.create();
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("cluster", clusterId);
List<HostVO> list = listBy(sc, new Filter(1));
@ -1375,7 +1417,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) {
SearchCriteria<HostVO> sc = TypeClusterStatusSearch.create();
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("cluster", clusterId);
sc.setParameters("status", Status.Up);
@ -1876,4 +1918,36 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return host.getClusterId();
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<HostVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -0,0 +1,64 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
@Entity
@Table(name = "storage_pool_and_access_group_map")
public class StoragePoolAndAccessGroupMapVO implements InternalIdentity {
protected StoragePoolAndAccessGroupMapVO() {
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "pool_id")
private long poolId;
@Column(name = "storage_access_group")
private String storageAccessGroup;
public StoragePoolAndAccessGroupMapVO(long poolId, String storageAccessGroup) {
this.poolId = poolId;
this.storageAccessGroup = storageAccessGroup;
}
@Override
public long getId() {
return this.id;
}
public long getPoolId() {
return poolId;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
}

View File

@ -0,0 +1,31 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import java.util.List;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.utils.db.GenericDao;
public interface StoragePoolAndAccessGroupMapDao extends GenericDao<StoragePoolAndAccessGroupMapVO, Long> {
void persist(long poolId, List<String> storageAccessGroups);
List<String> getStorageAccessGroups(long poolId);
void deleteStorageAccessGroups(long poolId);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -0,0 +1,105 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import java.util.ArrayList;
import java.util.List;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
public class StoragePoolAndAccessGroupMapDaoImpl extends GenericDaoBase<StoragePoolAndAccessGroupMapVO, Long> implements StoragePoolAndAccessGroupMapDao {
protected final SearchBuilder<StoragePoolAndAccessGroupMapVO> StoragePoolAccessGroupSearch;
public StoragePoolAndAccessGroupMapDaoImpl() {
StoragePoolAccessGroupSearch = createSearchBuilder();
StoragePoolAccessGroupSearch.and("poolId", StoragePoolAccessGroupSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
StoragePoolAccessGroupSearch.done();
}
@Override
public void persist(long poolId, List<String> storageAccessGroups) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
SearchCriteria<StoragePoolAndAccessGroupMapVO> sc = StoragePoolAccessGroupSearch.create();
sc.setParameters("poolId", poolId);
expunge(sc);
for (String sag : storageAccessGroups) {
sag = sag.trim();
if (sag.length() > 0) {
StoragePoolAndAccessGroupMapVO vo = new StoragePoolAndAccessGroupMapVO(poolId, sag);
persist(vo);
}
}
txn.commit();
}
@Override
public List<String> getStorageAccessGroups(long poolId) {
SearchCriteria<StoragePoolAndAccessGroupMapVO> sc = StoragePoolAccessGroupSearch.create();
sc.setParameters("poolId", poolId);
List<StoragePoolAndAccessGroupMapVO> results = search(sc, null);
List<String> storagePoolAccessGroups = new ArrayList<String>(results.size());
for (StoragePoolAndAccessGroupMapVO result : results) {
storagePoolAccessGroups.add(result.getStorageAccessGroup());
}
return storagePoolAccessGroups;
}
@Override
public void deleteStorageAccessGroups(long poolId) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
SearchCriteria<StoragePoolAndAccessGroupMapVO> sc = StoragePoolAccessGroupSearch.create();
sc.setParameters("poolId", poolId);
expunge(sc);
txn.commit();
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<StoragePoolAndAccessGroupMapVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroup());
searchBuilder.and("name", searchBuilder.entity().getStorageAccessGroup(), SearchCriteria.Op.EQ);
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroup(), SearchCriteria.Op.LIKE);
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("name", name);
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -58,9 +58,9 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
*/
void updateCapacityIops(long id, long capacityIops);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, List<String> storageAccessGroups);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails, List<String> storageAccessGroups);
/**
* Find pool by name.
@ -84,7 +84,9 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
*/
List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout);
List<StoragePoolVO> findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups);
List<StoragePoolVO> findDisabledPoolsByScope(long dcId, Long podId, Long clusterId, ScopeType scope);
@ -127,6 +129,10 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
List<StoragePoolVO> findZoneWideStoragePoolsByTags(long dcId, String[] tags, boolean validateTagRule);
List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups);
List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type);
List<StoragePoolVO> findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType);
List<StoragePoolVO> findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType, String keyword);
@ -143,6 +149,8 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
void deletePoolTags(long poolId);
void deleteStoragePoolAccessGroups(long poolId);
List<StoragePoolVO> listChildStoragePoolsInDatastoreCluster(long poolId);
Integer countAll();
@ -154,8 +162,10 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
List<StoragePoolVO> listStoragePoolsWithActiveVolumesByOfferingId(long offeringid);
Pair<List<Long>, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId,
String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, Filter searchFilter);
String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, String storageAccessGroup, Filter searchFilter);
List<StoragePoolVO> listByIds(List<Long> ids);
List<StoragePoolVO> findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType);
}

View File

@ -28,6 +28,8 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.host.Status;
@ -70,15 +72,25 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
private StoragePoolHostDao _hostDao;
@Inject
private StoragePoolTagsDao _tagsDao;
@Inject
StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao;
protected final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
protected final String DetailsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_details.pool_id";
private final String ZoneWideTagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideTagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
private final String ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id";
private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.hypervisor = ? and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id";
// Storage tags are now separate from storage_pool_details, leaving only details on that table
protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
protected final String SAGsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String SAGsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id";
private static final String GET_STORAGE_POOLS_OF_VOLUMES_WITHOUT_OR_NOT_HAVING_TAGS = "SELECT s.* " +
"FROM volumes vol " +
@ -296,13 +308,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
@Override
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule) {
return persist(pool, details, tags, isTagARule, true);
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, List<String> storageAccessGroups) {
return persist(pool, details, tags, isTagARule, true, storageAccessGroups);
}
@Override
@DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails) {
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails, List<String> storageAccessGroups) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
pool = super.persist(pool);
@ -315,6 +327,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (CollectionUtils.isNotEmpty(tags)) {
_tagsDao.persist(pool.getId(), tags, isTagARule);
}
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
_storagePoolAccessGroupMapDao.persist(pool.getId(), storageAccessGroups);
}
txn.commit();
return pool;
}
@ -338,6 +353,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, valuesLength);
}
protected List<StoragePoolVO> findPoolsByDetailsOrTagsForHostConnectionInternal(long dcId, long podId, Long clusterId, ScopeType scope, String sqlValues, ValueType valuesType) {
String sqlPrefix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlPrefix : SAGsForHostConnectionSqlPrefix;
String sqlSuffix = valuesType.equals(ValueType.DETAILS) ? DetailsForHostConnectionSqlSuffix : SAGsForHostConnectionSqlSuffix;
String sql = getSqlPreparedStatement(sqlPrefix, sqlSuffix, sqlValues, clusterId);
return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, null);
}
/**
* Search storage pools in a transaction
* @param sql prepared statement sql
@ -349,7 +371,50 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
* @return storage pools matching criteria
*/
@DB
protected List<StoragePoolVO> searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, int valuesLength) {
protected List<StoragePoolVO> searchStoragePoolsWithHypervisorTypesPreparedStatement(String sql, HypervisorType type, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
try (PreparedStatement pstmt = txn.prepareStatement(sql);) {
if (pstmt != null) {
int i = 1;
pstmt.setString(i++, type.toString());
pstmt.setLong(i++, dcId);
if (podId != null) {
pstmt.setLong(i++, podId);
}
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
if (valuesLength != null) {
pstmt.setInt(i++, valuesLength);
}
try (ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
return pools;
}
/**
* Search storage pools in a transaction
* @param sql prepared statement sql
* @param dcId data center id
* @param podId pod id
* @param clusterId cluster id
* @param scope scope
* @param valuesLength values length
* @return storage pools matching criteria
*/
@DB
protected List<StoragePoolVO> searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
try (PreparedStatement pstmt = txn.prepareStatement(sql);) {
@ -363,7 +428,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, valuesLength);
if (valuesLength != null) {
pstmt.setInt(i++, valuesLength);
}
try (ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
@ -420,6 +487,22 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
return sqlValues.toString();
}
/**
* Return SQL string from storage pool access group map, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement.
* @param storageAccessGroups storage tags array
* @return SQL string containing storage tag values to be placed between Prefix and Suffix when creating PreparedStatement.
* @throws NullPointerException if tags is null
* @throws IndexOutOfBoundsException if tags is not null, but empty
*/
protected String getSqlValuesFromStorageAccessGroups(String[] storageAccessGroups) throws NullPointerException, IndexOutOfBoundsException {
StringBuilder sqlValues = new StringBuilder();
for (String tag : storageAccessGroups) {
sqlValues.append("(storage_pool_and_access_group_map.storage_access_group='").append(tag).append("') OR ");
}
sqlValues.delete(sqlValues.length() - 4, sqlValues.length());
return sqlValues.toString();
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
@ -428,10 +511,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
@Override
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) {
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
storagePools = listBy(dcId, podId, clusterId, scope);
if (validateTagRule) {
storagePools = getPoolsWithoutTagRule(storagePools);
@ -439,7 +522,20 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
} else {
String sqlValues = getSqlValuesFromStorageTags(tags);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.CLUSTER, sqlValues, ValueType.TAGS, tags.length);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS, tags.length);
}
return storagePools;
}
@Override
public List<StoragePoolVO> findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups) {
List<StoragePoolVO> storagePools = null;
if (storageAccessGroups == null || storageAccessGroups.length == 0) {
storagePools = listBy(dcId, podId, clusterId, scope);
} else {
String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups);
storagePools = findPoolsByDetailsOrTagsForHostConnectionInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS);
}
return storagePools;
@ -556,6 +652,77 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
return storagePoolsToReturn;
}
@Override
public List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups) {
if (storageAccessGroups == null || storageAccessGroups.length == 0) {
QueryBuilder<StoragePoolVO> sc = QueryBuilder.create(StoragePoolVO.class);
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE);
return sc.list();
} else {
String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups);
String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix, ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix, sqlValues, null);
return searchStoragePoolsPreparedStatement(sql, dcId, null, null, ScopeType.ZONE, null);
}
}
@Override
public List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type) {
if (storageAccessGroups == null || storageAccessGroups.length == 0) {
QueryBuilder<StoragePoolVO> sc = QueryBuilder.create(StoragePoolVO.class);
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE);
sc.and(sc.entity().getHypervisor(), Op.EQ, type);
return sc.list();
} else {
String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups);
String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix, ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix, sqlValues, null);
return searchStoragePoolsWithHypervisorTypesPreparedStatement(sql, type, dcId, null, null, ScopeType.ZONE, null);
}
}
@Override
public List<StoragePoolVO> findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType) {
SearchBuilder<StoragePoolVO> poolSearch = createSearchBuilder();
SearchBuilder<StoragePoolAndAccessGroupMapVO> storageAccessGroupsPoolSearch = _storagePoolAccessGroupMapDao.createSearchBuilder();
// Set criteria for pools
poolSearch.and("scope", poolSearch.entity().getScope(), Op.EQ);
poolSearch.and("removed", poolSearch.entity().getRemoved(), Op.NULL);
poolSearch.and("status", poolSearch.entity().getStatus(), Op.EQ);
poolSearch.and("datacenterid", poolSearch.entity().getDataCenterId(), Op.EQ);
poolSearch.and("podid", poolSearch.entity().getPodId(), Op.EQ);
poolSearch.and("clusterid", poolSearch.entity().getClusterId(), Op.EQ);
poolSearch.and("hypervisortype", poolSearch.entity().getHypervisor(), Op.EQ);
// Set StoragePoolAccessGroupMapVO.pool_id IS NULL. This ensures only pools without tags are returned
storageAccessGroupsPoolSearch.and("poolid", storageAccessGroupsPoolSearch.entity().getPoolId(), Op.NULL);
poolSearch.join("tagJoin", storageAccessGroupsPoolSearch, poolSearch.entity().getId(), storageAccessGroupsPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.LEFT);
SearchCriteria<StoragePoolVO> sc = poolSearch.create();
sc.setParameters("scope", scope.toString());
sc.setParameters("status", Status.Up.toString());
if (dcId != null) {
sc.setParameters("datacenterid", dcId);
}
if (podId != null) {
sc.setParameters("podid", podId);
}
if (clusterId != null) {
sc.setParameters("clusterid", clusterId);
}
if (hypervisorType != null) {
sc.setParameters("hypervisortype", hypervisorType);
}
return listBy(sc);
}
@Override
public List<String> searchForStoragePoolTags(long poolId) {
return _tagsDao.getStoragePoolTags(poolId);
@ -659,6 +826,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
_tagsDao.deleteTags(poolId);
}
@Override
public void deleteStoragePoolAccessGroups(long poolId) {
_storagePoolAccessGroupMapDao.deleteStorageAccessGroups(poolId);
}
@Override
public List<StoragePoolVO> listChildStoragePoolsInDatastoreCluster(long poolId) {
QueryBuilder<StoragePoolVO> sc = QueryBuilder.create(StoragePoolVO.class);
@ -725,9 +897,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
@Override
public Pair<List<Long>, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId,
String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, Filter searchFilter) {
SearchCriteria<StoragePoolVO> sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId, address, scopeType, status, keyword);
String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, String storageAccessGroup, Filter searchFilter) {
SearchCriteria<StoragePoolVO> sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId,
hostId, address, scopeType, status, keyword, storageAccessGroup);
Pair<List<StoragePoolVO>, Integer> uniquePair = searchAndCount(sc, searchFilter);
List<Long> idList = uniquePair.first().stream().map(StoragePoolVO::getId).collect(Collectors.toList());
return new Pair<>(idList, uniquePair.second());
@ -744,8 +917,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
private SearchCriteria<StoragePoolVO> createStoragePoolSearchCriteria(Long storagePoolId, String storagePoolName,
Long zoneId, String path, Long podId, Long clusterId, String address, ScopeType scopeType,
StoragePoolStatus status, String keyword) {
Long zoneId, String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType,
StoragePoolStatus status, String keyword, String storageAccessGroup) {
SearchBuilder<StoragePoolVO> sb = createSearchBuilder();
sb.select(null, SearchCriteria.Func.DISTINCT, sb.entity().getId()); // select distinct
// ids
@ -760,6 +933,18 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ);
sb.and("parent", sb.entity().getParent(), SearchCriteria.Op.EQ);
if (hostId != null) {
SearchBuilder<StoragePoolHostVO> hostJoin = _hostDao.createSearchBuilder();
hostJoin.and("hostId", hostJoin.entity().getHostId(), SearchCriteria.Op.EQ);
sb.join("poolHostJoin", hostJoin, sb.entity().getId(), hostJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER);
}
if (storageAccessGroup != null) {
SearchBuilder<StoragePoolAndAccessGroupMapVO> storageAccessGroupJoin = _storagePoolAccessGroupMapDao.createSearchBuilder();
storageAccessGroupJoin.and("storageAccessGroup", storageAccessGroupJoin.entity().getStorageAccessGroup(), SearchCriteria.Op.EQ);
sb.join("poolStorageAccessGroupJoin", storageAccessGroupJoin, sb.entity().getId(), storageAccessGroupJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER);
}
SearchCriteria<StoragePoolVO> sc = sb.create();
if (keyword != null) {
@ -808,6 +993,15 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.setParameters("status", status.toString());
}
sc.setParameters("parent", 0);
if (hostId != null) {
sc.setJoinParameters("poolHostJoin", "hostId", hostId);
}
if (storageAccessGroup != null) {
sc.setJoinParameters("poolStorageAccessGroupJoin", "storageAccessGroup", storageAccessGroup);
}
return sc;
}
}

View File

@ -62,6 +62,7 @@
<bean id="storagePoolDetailsDaoImpl" class="com.cloud.storage.dao.StoragePoolDetailsDaoImpl" />
<bean id="storagePoolHostDaoImpl" class="com.cloud.storage.dao.StoragePoolHostDaoImpl" />
<bean id="storagePoolTagsDaoImpl" class="com.cloud.storage.dao.StoragePoolTagsDaoImpl" />
<bean id="storagePoolAndAccessGroupMapDaoImpl" class="com.cloud.storage.dao.StoragePoolAndAccessGroupMapDaoImpl" />
<bean id="userVmDetailsDaoImpl" class="com.cloud.vm.dao.UserVmDetailsDaoImpl" />
<bean id="vGPUTypesDaoImpl" class="com.cloud.gpu.dao.VGPUTypesDaoImpl" />
<bean id="vMInstanceDaoImpl" class="com.cloud.vm.dao.VMInstanceDaoImpl" />

View File

@ -99,3 +99,18 @@ CREATE TABLE IF NOT EXISTS `cloud`.`reconcile_commands` (
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'kvm_checkpoint_path', 'varchar(255)');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'end_of_chain', 'int(1) unsigned');
-- Create table storage_pool_and_access_group_map
CREATE TABLE IF NOT EXISTS `cloud`.`storage_pool_and_access_group_map` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`pool_id` bigint(20) unsigned NOT NULL COMMENT "pool id",
`storage_access_group` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `fk_storage_pool_and_access_group_map__pool_id` (`pool_id`),
CONSTRAINT `fk_storage_pool_and_access_group_map__pool_id` FOREIGN KEY (`pool_id`) REFERENCES `storage_pool` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the host"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.cluster', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the cluster"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host_pod_ref', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the pod"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.data_center', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the zone"');

View File

@ -42,6 +42,7 @@ select
data_center.type,
data_center.removed,
data_center.sort_key,
data_center.storage_access_groups,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,

View File

@ -42,17 +42,21 @@ SELECT
host.speed,
host.ram,
host.arch,
host.storage_access_groups,
cluster.id cluster_id,
cluster.uuid cluster_uuid,
cluster.name cluster_name,
cluster.cluster_type,
cluster.storage_access_groups AS cluster_storage_access_groups,
data_center.id data_center_id,
data_center.uuid data_center_uuid,
data_center.name data_center_name,
data_center.storage_access_groups AS zone_storage_access_groups,
data_center.networktype data_center_type,
host_pod_ref.id pod_id,
host_pod_ref.uuid pod_uuid,
host_pod_ref.name pod_name,
host_pod_ref.storage_access_groups AS pod_storage_access_groups,
GROUP_CONCAT(DISTINCT(host_tags.tag)) AS tag,
GROUP_CONCAT(DISTINCT(explicit_host_tags.tag)) AS explicit_tag,
GROUP_CONCAT(DISTINCT(implicit_host_tags.tag)) AS implicit_tag,

View File

@ -51,6 +51,7 @@ SELECT
`host_pod_ref`.`name` AS `pod_name`,
`storage_pool_tags`.`tag` AS `tag`,
`storage_pool_tags`.`is_tag_a_rule` AS `is_tag_a_rule`,
`storage_pool_and_access_group_map`.`storage_access_group` AS `storage_access_group`,
`op_host_capacity`.`used_capacity` AS `disk_used_capacity`,
`op_host_capacity`.`reserved_capacity` AS `disk_reserved_capacity`,
`async_job`.`id` AS `job_id`,
@ -58,13 +59,16 @@ SELECT
`async_job`.`job_status` AS `job_status`,
`async_job`.`account_id` AS `job_account_id`
FROM
((((((`cloud`.`storage_pool`
LEFT JOIN `cloud`.`cluster` ON ((`storage_pool`.`cluster_id` = `cluster`.`id`)))
LEFT JOIN `cloud`.`data_center` ON ((`storage_pool`.`data_center_id` = `data_center`.`id`)))
LEFT JOIN `cloud`.`host_pod_ref` ON ((`storage_pool`.`pod_id` = `host_pod_ref`.`id`)))
LEFT JOIN `cloud`.`storage_pool_tags` ON (((`storage_pool_tags`.`pool_id` = `storage_pool`.`id`))))
LEFT JOIN `cloud`.`op_host_capacity` ON (((`storage_pool`.`id` = `op_host_capacity`.`host_id`)
AND (`op_host_capacity`.`capacity_type` IN (3 , 9)))))
LEFT JOIN `cloud`.`async_job` ON (((`async_job`.`instance_id` = `storage_pool`.`id`)
AND (`async_job`.`instance_type` = 'StoragePool')
AND (`async_job`.`job_status` = 0))));
`cloud`.`storage_pool`
LEFT JOIN `cloud`.`cluster` ON `storage_pool`.`cluster_id` = `cluster`.`id`
LEFT JOIN `cloud`.`data_center` ON `storage_pool`.`data_center_id` = `data_center`.`id`
LEFT JOIN `cloud`.`host_pod_ref` ON `storage_pool`.`pod_id` = `host_pod_ref`.`id`
LEFT JOIN `cloud`.`storage_pool_tags` ON `storage_pool_tags`.`pool_id` = `storage_pool`.`id`
LEFT JOIN `cloud`.`storage_pool_and_access_group_map` ON `storage_pool_and_access_group_map`.`pool_id` = `storage_pool`.`id`
LEFT JOIN `cloud`.`op_host_capacity`
ON `storage_pool`.`id` = `op_host_capacity`.`host_id`
AND `op_host_capacity`.`capacity_type` IN (3, 9)
LEFT JOIN `cloud`.`async_job`
ON `async_job`.`instance_id` = `storage_pool`.`id`
AND `async_job`.`instance_type` = 'StoragePool'
AND `async_job`.`job_status` = 0;

View File

@ -35,6 +35,7 @@ import javax.inject.Inject;
import com.cloud.agent.api.CheckVirtualMachineAnswer;
import com.cloud.agent.api.CheckVirtualMachineCommand;
import com.cloud.agent.api.PrepareForMigrationAnswer;
import com.cloud.resource.ResourceManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
@ -51,6 +52,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction;
@ -199,6 +201,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
VMTemplatePoolDao templatePoolDao;
@Inject
private VolumeDataFactory _volFactory;
@Inject
ResourceManager resourceManager;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
@ -485,10 +489,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HostVO hostVO;
if (srcStoragePoolVO.getClusterId() != null) {
hostVO = getHostInCluster(srcStoragePoolVO.getClusterId());
hostVO = getHostInCluster(srcStoragePoolVO);
}
else {
hostVO = getHost(srcVolumeInfo.getDataCenterId(), hypervisorType, false);
hostVO = getHost(srcVolumeInfo, hypervisorType, false);
}
volumePath = copyManagedVolumeToSecondaryStorage(srcVolumeInfo, destVolumeInfo, hostVO,
@ -556,10 +560,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HostVO hostVO;
if (destStoragePoolVO.getClusterId() != null) {
hostVO = getHostInCluster(destStoragePoolVO.getClusterId());
hostVO = getHostInCluster(destStoragePoolVO);
}
else {
hostVO = getHost(destVolumeInfo.getDataCenterId(), hypervisorType, false);
hostVO = getHost(destVolumeInfo, hypervisorType, false);
}
setCertainVolumeValuesNull(destVolumeInfo.getId());
@ -933,9 +937,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
hostVO = _hostDao.findById(destVolumeInfo.getDataStore().getScope().getScopeId());
} else {
if (srcStoragePoolVO.getClusterId() != null) {
hostVO = getHostInCluster(srcStoragePoolVO.getClusterId());
hostVO = getHostInCluster(srcStoragePoolVO);
} else {
hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false);
hostVO = getHost(destVolumeInfo, HypervisorType.KVM, false);
}
}
@ -1337,7 +1341,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
createVolumeFromSnapshot(snapshotInfo);
HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), HypervisorType.XenServer, true);
HostVO hostVO = getHost(snapshotInfo, HypervisorType.XenServer, true);
copyCmdAnswer = performResignature(snapshotInfo, hostVO, null, true);
@ -1349,7 +1353,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId());
HostVO hostVO = getHostInCluster(volumeStoragePoolVO);
if (!usingBackendSnapshot) {
long snapshotStoragePoolId = snapshotInfo.getDataStore().getId();
@ -1379,7 +1383,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
finally {
try {
HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId());
HostVO hostVO = getHostInCluster(volumeStoragePoolVO);
long snapshotStoragePoolId = snapshotInfo.getDataStore().getId();
DataStore snapshotDataStore = dataStoreMgr.getDataStore(snapshotStoragePoolId, DataStoreRole.Primary);
@ -1473,7 +1477,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false);
hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false);
// copy the volume from secondary via the hypervisor
if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) {
@ -1554,7 +1558,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
// only XenServer, VMware, and KVM are currently supported
// Leave host equal to null for KVM since we don't need to perform a resignature when using that hypervisor type.
if (volumeInfo.getFormat() == ImageFormat.VHD) {
hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.XenServer, true);
hostVO = getHost(volumeInfo, HypervisorType.XenServer, true);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " +
@ -1574,7 +1578,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
else if (volumeInfo.getFormat() == ImageFormat.OVA) {
// all VMware hosts support resigning
hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.VMware, false);
hostVO = getHost(volumeInfo, HypervisorType.VMware, false);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " +
@ -1757,7 +1761,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
} else {
// asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning
// even when we don't need those hosts to do this kind of copy work
hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false);
hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false);
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
@ -1814,7 +1818,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
HostVO hostVO = getHost(dataCenterId, hypervisorType, false);
HostVO hostVO = getHost(destVolumeInfo, hypervisorType, false);
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
@ -2606,7 +2610,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
volumeInfo.processEvent(Event.MigrationRequested);
HostVO hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.KVM, false);
HostVO hostVO = getHost(volumeInfo, HypervisorType.KVM, false);
DataStore srcDataStore = volumeInfo.getDataStore();
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
@ -2764,10 +2768,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HypervisorType hypervisorType = snapshotInfo.getHypervisorType();
if (HypervisorType.XenServer.equals(hypervisorType)) {
HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, true);
HostVO hostVO = getHost(snapshotInfo, hypervisorType, true);
if (hostVO == null) {
hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, false);
hostVO = getHost(snapshotInfo, hypervisorType, false);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId());
@ -2778,14 +2782,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
return getHost(snapshotInfo.getDataCenterId(), hypervisorType, false);
return getHost(snapshotInfo, hypervisorType, false);
}
throw new CloudRuntimeException("Unsupported hypervisor type");
}
private HostVO getHostInCluster(long clusterId) {
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
private HostVO getHostInCluster(StoragePoolVO storagePool) {
DataStore store = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary);
List<HostVO> hosts = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection((PrimaryDataStoreInfo) store);
if (hosts != null && hosts.size() > 0) {
Collections.shuffle(hosts, RANDOM);
@ -2800,12 +2805,37 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException("Unable to locate a host");
}
private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
private HostVO getHost(SnapshotInfo snapshotInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
Long zoneId = snapshotInfo.getDataCenterId();
Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null.");
Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null.");
List<HostVO> hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
List<HostVO> hosts;
if (DataStoreRole.Primary.equals(snapshotInfo.getDataStore().getRole())) {
hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(snapshotInfo.getDataStore(), zoneId, hypervisorType);
} else {
hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
}
return getHost(hosts, computeClusterMustSupportResign);
}
private HostVO getHost(VolumeInfo volumeInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
Long zoneId = volumeInfo.getDataCenterId();
Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null.");
Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null.");
List<HostVO> hosts;
if (DataStoreRole.Primary.equals(volumeInfo.getDataStore().getRole())) {
hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(volumeInfo.getDataStore(), zoneId, hypervisorType);
} else {
hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
}
return getHost(hosts, computeClusterMustSupportResign);
}
private HostVO getHost(List<HostVO> hosts, boolean computeClusterMustSupportResign) {
if (hosts == null) {
return null;
}

View File

@ -17,41 +17,45 @@
package org.apache.cloudstack.storage.allocator;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePoolStatus;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.lang3.StringUtils;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.utils.Pair;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StorageUtil;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachineProfile;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
@ -77,11 +81,15 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
@Inject protected PrimaryDataStoreDao storagePoolDao;
@Inject protected VolumeDao volumeDao;
@Inject protected ConfigurationDao configDao;
@Inject protected ClusterDao clusterDao;
@Inject protected CapacityDao capacityDao;
@Inject private ClusterDao clusterDao;
@Inject private StorageManager storageMgr;
@Inject private StorageUtil storageUtil;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
protected HostDao hostDao;
@Inject
protected HostPodDao podDao;
/**
* make sure shuffled lists of Pools are really shuffled
@ -320,6 +328,16 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
return false;
}
if (plan.getHostId() != null) {
HostVO plannedHost = hostDao.findById(plan.getHostId());
if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost));
}
return false;
}
}
Volume volume = null;
boolean isTempVolume = dskCh.getVolumeId() == Volume.DISK_OFFERING_SUITABILITY_CHECK_VOLUME_ID;
if (!isTempVolume) {

View File

@ -77,12 +77,12 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER);
}
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value());
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value());
pools.addAll(storagePoolJoinDao.findStoragePoolByScopeAndRuleTags(dcId, podId, clusterId, ScopeType.CLUSTER, List.of(dskCh.getTags())));
logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags())));
// add remaining pools in cluster, that did not match tags, to avoid set
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null, false, 0);
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, null, false, 0);
allPools.removeAll(pools);
for (StoragePoolVO pool : allPools) {
logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool));
@ -100,7 +100,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
}
StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
logger.debug(String.format("Found suitable cluster storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
suitablePools.add(storagePool);
} else {
logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh));

View File

@ -96,7 +96,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh));
logger.debug(String.format("Found suitable zone wide storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh));
suitablePools.add(storagePool);
} else {
if (canAddStoragePoolToAvoidSet(storage)) {

View File

@ -80,7 +80,7 @@ public class DefaultEndPointSelector implements EndPointSelector {
private final String findOneHostOnPrimaryStorage = "select t.id from "
+ "(select h.id, cd.value, hd.value as " + VOL_ENCRYPT_COLUMN_NAME + " "
+ "from host h join storage_pool_host_ref s on h.id = s.host_id "
+ "join cluster c on c.id=h.cluster_id "
+ "join cluster c on c.id=h.cluster_id and c.allocation_state = 'Enabled'"
+ "left join cluster_details cd on c.id=cd.cluster_id and cd.name='" + CapacityManager.StorageOperationsExcludeCluster.key() + "' "
+ "left join host_details hd on h.id=hd.host_id and hd.name='" + HOST_VOLUME_ENCRYPTION + "' "
+ "where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and s.pool_id = ? ";

View File

@ -159,7 +159,23 @@ public class PrimaryDataStoreHelper {
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails);
String storageAccessGroupsParams = params.getStorageAccessGroups();
List<String> storageAccessGroupsList = new ArrayList<String>();
if (storageAccessGroupsParams != null) {
String[] storageAccessGroups = storageAccessGroupsParams.split(",");
for (String storageAccessGroup : storageAccessGroups) {
storageAccessGroup = storageAccessGroup.trim();
if (storageAccessGroup.length() == 0) {
continue;
}
storageAccessGroupsList.add(storageAccessGroup);
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails, storageAccessGroupsList);
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
}
@ -278,6 +294,7 @@ public class PrimaryDataStoreHelper {
this.dataStoreDao.update(poolVO.getId(), poolVO);
dataStoreDao.remove(poolVO.getId());
dataStoreDao.deletePoolTags(poolVO.getId());
dataStoreDao.deleteStoragePoolAccessGroups(poolVO.getId());
annotationDao.removeByEntityType(AnnotationService.EntityType.PRIMARY_STORAGE.name(), poolVO.getUuid());
deletePoolStats(poolVO.getId());
// Delete op_host_capacity entries

View File

@ -21,6 +21,7 @@ package org.apache.cloudstack.storage.datastore.provider;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.SetupPersistentNetworkCommand;
@ -45,6 +46,7 @@ import com.cloud.storage.StorageService;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@ -207,8 +209,41 @@ public class DefaultHostListener implements HypervisorHostListener {
@Override
public boolean hostDisconnected(long hostId, long poolId) {
// TODO Auto-generated method stub
return false;
HostVO host = hostDao.findById(hostId);
if (host == null) {
logger.error("Failed to disconnect host by HostListener as host was not found with id : " + hostId);
return false;
}
DataStore dataStore = dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
StoragePool storagePool = (StoragePool) dataStore;
DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(storagePool);
Answer answer = sendDeleteStoragePoolCommand(cmd, storagePool, host);
if (!answer.getResult()) {
logger.error("Failed to disconnect storage pool: " + storagePool + " and host: " + host);
return false;
}
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (storagePoolHost != null) {
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
}
logger.info("Connection removed between storage pool: " + storagePool + " and host: " + host);
return true;
}
private Answer sendDeleteStoragePoolCommand(DeleteStoragePoolCommand cmd, StoragePool storagePool, HostVO host) {
Answer answer = agentMgr.easySend(host.getId(), cmd);
if (answer == null) {
throw new CloudRuntimeException(String.format("Unable to get an answer to the delete storage pool command for storage pool %s, sent to host %s", storagePool, host));
}
if (!answer.getResult()) {
String msg = "Unable to detach storage pool " + storagePool + " from the host " + host;
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
}
return answer;
}
@Override

View File

@ -328,6 +328,11 @@ public class VolumeServiceImpl implements VolumeService {
} else {
vo.processEvent(Event.OperationFailed);
errMsg = result.getResult();
VolumeVO volume = volDao.findById(vo.getId());
if (volume != null && volume.getState() == State.Allocated && volume.getPodId() != null) {
volume.setPoolId(null);
volDao.update(volume.getId(), volume);
}
}
VolumeApiResult volResult = new VolumeApiResult((VolumeObject)vo);
if (errMsg != null) {
@ -1255,6 +1260,10 @@ public class VolumeServiceImpl implements VolumeService {
}
if (volume.getState() == State.Allocated) { // Possible states here: Allocated, Ready & Creating
if (volume.getPodId() != null) {
volume.setPoolId(null);
volDao.update(volume.getId(), volume);
}
return;
}
@ -2494,7 +2503,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
volume.processEvent(Event.ResizeRequested);
} catch (Exception e) {
logger.debug("Failed to change state to resize", e);
logger.debug("Failed to change volume state to resize", e);
result.setResult(e.toString());
future.complete(result);
return future;
@ -2506,10 +2515,8 @@ public class VolumeServiceImpl implements VolumeService {
try {
volume.getDataStore().getDriver().resize(volume, caller);
} catch (Exception e) {
logger.debug("Failed to change state to resize", e);
logger.debug("Failed to resize volume", e);
result.setResult(e.toString());
future.complete(result);
}
@ -2553,7 +2560,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
volume.processEvent(Event.OperationFailed);
} catch (Exception e) {
logger.debug("Failed to change state", e);
logger.debug("Failed to change volume state (after resize failure)", e);
}
VolumeApiResult res = new VolumeApiResult(volume);
res.setResult(result.getResult());
@ -2564,13 +2571,8 @@ public class VolumeServiceImpl implements VolumeService {
try {
volume.processEvent(Event.OperationSuccessed);
} catch (Exception e) {
logger.debug("Failed to change state", e);
VolumeApiResult res = new VolumeApiResult(volume);
res.setResult(result.getResult());
future.complete(res);
return null;
logger.debug("Failed to change volume state (after resize success)", e);
}
VolumeApiResult res = new VolumeApiResult(volume);
future.complete(res);

View File

@ -25,6 +25,7 @@ import com.cloud.host.Status;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
@ -278,6 +279,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
restoredVolume.setPoolId(dataStore.getPoolId());
restoredVolume.setPath(restoredVolume.getUuid());
restoredVolume.setState(Volume.State.Copying);
restoredVolume.setFormat(Storage.ImageFormat.QCOW2);
restoredVolume.setSize(backedUpVolumeSize);
restoredVolume.setDiskOfferingId(volume.getDiskOfferingId());

View File

@ -45,7 +45,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
private static final String MOUNT_COMMAND = "sudo mount -t %s %s %s";
private static final String UMOUNT_COMMAND = "sudo umount %s";
private static final String FILE_PATH_PLACEHOLDER = "%s/%s";
private static final String ATTACH_DISK_COMMAND = " virsh attach-disk %s %s %s --cache none";
private static final String ATTACH_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none";
private static final String CURRRENT_DEVICE = "virsh domblklist --domain %s | tail -n 3 | head -n 1 | awk '{print $1}'";
private static final String RSYNC_COMMAND = "rsync -az %s %s";

View File

@ -1609,7 +1609,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
} else {
destFile = new QemuImgFile(destPath, destFormat);
try {
qemu.convert(srcFile, destFile, null, null, new QemuImageOptions(srcFile.getFormat(), srcFile.getFileName(), null), null, false, true);
boolean isQCOW2 = PhysicalDiskFormat.QCOW2.equals(sourceFormat);
qemu.convert(srcFile, destFile, null, null, new QemuImageOptions(srcFile.getFormat(), srcFile.getFileName(), null),
null, false, isQCOW2);
Map<String, String> destInfo = qemu.info(destFile);
Long virtualSize = Long.parseLong(destInfo.get(QemuImg.VIRTUAL_SIZE));
newDisk.setVirtualSize(virtualSize);

View File

@ -43,6 +43,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.EntityExistsException;
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
import com.cloud.hypervisor.vmware.util.VmwareClient;
import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd;
import org.apache.cloudstack.api.command.admin.zone.ImportVsphereStoragePoliciesCmd;
@ -171,8 +172,11 @@ import com.cloud.vm.dao.VMInstanceDao;
import com.vmware.pbm.PbmProfile;
import com.vmware.vim25.AboutInfo;
import com.vmware.vim25.ManagedObjectReference;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService, Configurable {
protected static Logger static_logger = LogManager.getLogger(VmwareManagerImpl.class);
private static final long SECONDS_PER_MINUTE = 60;
private static final int DEFAULT_PORTS_PER_DV_PORT_GROUP_VSPHERE4_x = 256;
@ -1585,14 +1589,26 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
return compatiblePools;
}
@Override
public List<UnmanagedInstanceTO> listVMsInDatacenter(ListVmwareDcVmsCmd cmd) {
private static class VcenterData {
public final String vcenter;
public final String datacenterName;
public final String username;
public final String password;
public VcenterData(String vcenter, String datacenterName, String username, String password) {
this.vcenter = vcenter;
this.datacenterName = datacenterName;
this.username = username;
this.password = password;
}
}
private VcenterData getVcenterData(ListVmwareDcVmsCmd cmd) {
String vcenter = cmd.getVcenter();
String datacenterName = cmd.getDatacenterName();
String username = cmd.getUsername();
String password = cmd.getPassword();
Long existingVcenterId = cmd.getExistingVcenterId();
String keyword = cmd.getKeyword();
if ((existingVcenterId == null && StringUtils.isBlank(vcenter)) ||
(existingVcenterId != null && StringUtils.isNotBlank(vcenter))) {
@ -1613,34 +1629,69 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
username = vmwareDc.getUser();
password = vmwareDc.getPassword();
}
VcenterData vmwaredc = new VcenterData(vcenter, datacenterName, username, password);
return vmwaredc;
}
private static VmwareContext getVmwareContext(String vcenter, String username, String password) throws Exception {
static_logger.debug(String.format("Connecting to the VMware vCenter %s", vcenter));
String serviceUrl = String.format("https://%s/sdk/vimService", vcenter);
VmwareClient vimClient = new VmwareClient(vcenter);
vimClient.connect(serviceUrl, username, password);
return new VmwareContext(vimClient, vcenter);
}
@Override
public List<UnmanagedInstanceTO> listVMsInDatacenter(ListVmwareDcVmsCmd cmd) {
VcenterData vmwareDC = getVcenterData(cmd);
String vcenter = vmwareDC.vcenter;
String username = vmwareDC.username;
String password = vmwareDC.password;
String datacenterName = vmwareDC.datacenterName;
String keyword = cmd.getKeyword();
String esxiHostName = cmd.getHostName();
String virtualMachineName = cmd.getInstanceName();
try {
logger.debug(String.format("Connecting to the VMware datacenter %s at vCenter %s to retrieve VMs",
datacenterName, vcenter));
String serviceUrl = String.format("https://%s/sdk/vimService", vcenter);
VmwareClient vimClient = new VmwareClient(vcenter);
vimClient.connect(serviceUrl, username, password);
VmwareContext context = new VmwareContext(vimClient, vcenter);
VmwareContext context = getVmwareContext(vcenter, username, password);
DatacenterMO dcMo = getDatacenterMO(context, vcenter, datacenterName);
DatacenterMO dcMo = new DatacenterMO(context, datacenterName);
ManagedObjectReference dcMor = dcMo.getMor();
if (dcMor == null) {
String msg = String.format("Unable to find VMware datacenter %s in vCenter %s",
datacenterName, vcenter);
logger.error(msg);
throw new InvalidParameterValueException(msg);
List<UnmanagedInstanceTO> instances;
if (StringUtils.isNotBlank(esxiHostName) && StringUtils.isNotBlank(virtualMachineName)) {
ManagedObjectReference hostMor = dcMo.findHost(esxiHostName);
if (hostMor == null) {
String errorMsg = String.format("Cannot find a host with name %s on vcenter %s", esxiHostName, vcenter);
logger.error(errorMsg);
throw new CloudRuntimeException(errorMsg);
}
HostMO hostMO = new HostMO(context, hostMor);
VirtualMachineMO vmMo = hostMO.findVmOnHyperHost(virtualMachineName);
instances = Collections.singletonList(VmwareHelper.getUnmanagedInstance(hostMO, vmMo));
} else {
instances = dcMo.getAllVmsOnDatacenter(keyword);
}
List<UnmanagedInstanceTO> instances = dcMo.getAllVmsOnDatacenter();
return StringUtils.isBlank(keyword) ? instances :
instances.stream().filter(x -> x.getName().toLowerCase().contains(keyword.toLowerCase())).collect(Collectors.toList());
return instances;
} catch (Exception e) {
String errorMsg = String.format("Error retrieving stopped VMs from the VMware VC %s datacenter %s: %s",
String errorMsg = String.format("Error retrieving VMs from the VMware VC %s datacenter %s: %s",
vcenter, datacenterName, e.getMessage());
logger.error(errorMsg, e);
throw new CloudRuntimeException(errorMsg);
}
}
private static DatacenterMO getDatacenterMO(VmwareContext context, String vcenter, String datacenterName) throws Exception {
DatacenterMO dcMo = new DatacenterMO(context, datacenterName);
ManagedObjectReference dcMor = dcMo.getMor();
if (dcMor == null) {
String msg = String.format("Unable to find VMware datacenter %s in vCenter %s", datacenterName, vcenter);
static_logger.error(msg);
throw new InvalidParameterValueException(msg);
}
return dcMo;
}
@Override
public boolean hasNexusVSM(Long clusterId) {
ClusterVSMMapVO vsmMapVo = null;
@ -1693,7 +1744,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
}
/**
* This task is to cleanup templates from primary storage that are otherwise not cleaned by the {@link com.cloud.storage.StorageManagerImpl.StorageGarbageCollector}.
* This task is to cleanup templates from primary storage that are otherwise not cleaned by the {code}StorageGarbageCollector{code} from {@link com.cloud.storage.StorageManagerImpl}.
* it is called at regular intervals when storage.template.cleanup.enabled == true
* It collect all templates that
* - are deleted from cloudstack

View File

@ -2042,7 +2042,6 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
VirtualMachineDefinedProfileSpec diskProfileSpec = null;
VirtualMachineDefinedProfileSpec vmProfileSpec = null;
DeployAsIsInfoTO deployAsIsInfo = vmSpec.getDeployAsIsInfo();
boolean deployAsIs = deployAsIsInfo != null;
@ -2086,7 +2085,6 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
}
VirtualMachineDiskInfoBuilder diskInfoBuilder = null;
VirtualDevice[] nicDevices = null;
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
DiskControllerType systemVmScsiControllerType = DiskControllerType.lsilogic;
int firstScsiControllerBusNum = 0;
@ -2103,7 +2101,6 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
diskDatastores = vmMo.getAllDiskDatastores();
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
nicDevices = vmMo.getNicDevices();
tearDownVmDevices(vmMo, hasSnapshot, deployAsIs);
ensureDiskControllersInternal(vmMo, systemVm, controllerInfo, systemVmScsiControllerType,
@ -2119,17 +2116,20 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
}
takeVmFromOtherHyperHost(hyperHost, vmInternalCSName);
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (getVmPowerState(vmMo) != PowerState.PowerOff)
vmMo.safePowerOff(_shutdownWaitMs);
if (vmMo != null) {
if (getVmPowerState(vmMo) != PowerState.PowerOff)
vmMo.safePowerOff(_shutdownWaitMs);
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
diskDatastores = vmMo.getAllDiskDatastores();
diskInfoBuilder = vmMo.getDiskInfoBuilder();
hasSnapshot = vmMo.hasSnapshot();
diskDatastores = vmMo.getAllDiskDatastores();
tearDownVmDevices(vmMo, hasSnapshot, deployAsIs);
ensureDiskControllersInternal(vmMo, systemVm, controllerInfo, systemVmScsiControllerType,
numScsiControllerForSystemVm, firstScsiControllerBusNum, deployAsIs);
tearDownVmDevices(vmMo, hasSnapshot, deployAsIs);
ensureDiskControllersInternal(vmMo, systemVm, controllerInfo, systemVmScsiControllerType,
numScsiControllerForSystemVm, firstScsiControllerBusNum, deployAsIs);
}
} else {
// If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration).
VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName);
@ -2146,7 +2146,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo == null) {
logger.info("Cloned deploy-as-is VM " + vmInternalCSName + " is not in this host, relocating it");
vmMo = takeVmFromOtherHyperHost(hyperHost, vmInternalCSName);
takeVmFromOtherHyperHost(hyperHost, vmInternalCSName);
}
} else {
DiskTO rootDisk = null;
@ -2256,11 +2256,11 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm());
}
if(!vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()){
if (!vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()) {
logger.warn("hotadd of memory is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName);
}
if(!vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()){
if (!vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()) {
logger.warn("hotadd of cpu is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName);
}
@ -2593,7 +2593,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
Map<String, Map<String, String>> iqnToData = new HashMap<>();
postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToData, hyperHost, context);
postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, iqnToData, hyperHost, context);
//
// Power-on VM
@ -2731,14 +2731,24 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
}
private boolean powerOnVM(final VirtualMachineMO vmMo, final String vmInternalCSName, final String vmNameOnVcenter) throws Exception {
int retry = 20;
while (retry-- > 0) {
final int retry = 20;
int retryAttempt = 0;
while (++retryAttempt <= retry) {
try {
logger.debug(String.format("VM %s, powerOn attempt #%d", vmInternalCSName, retryAttempt));
return vmMo.powerOn();
} catch (Exception e) {
logger.info(String.format("Got exception while power on VM %s with hostname %s", vmInternalCSName, vmNameOnVcenter), e);
if (e.getMessage() != null && e.getMessage().contains("File system specific implementation of Ioctl[file] failed")) {
if (e.getMessage() != null &&
(e.getMessage().contains("File system specific implementation of Ioctl[file] failed") ||
e.getMessage().contains("Unable to access file") ||
e.getMessage().contains("it is locked"))) {
logger.debug(String.format("Failed to power on VM %s with hostname %s. Retrying", vmInternalCSName, vmNameOnVcenter));
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
logger.debug(String.format("Waiting to power on VM %s been interrupted: ", vmInternalCSName));
}
} else {
throw e;
}
@ -3292,7 +3302,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
int getReservedMemoryMb(VirtualMachineTO vmSpec) {
if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) {
if(vmSpec.getDetails().get(VmDetailConstants.RAM_RESERVATION) != null){
if (vmSpec.getDetails().get(VmDetailConstants.RAM_RESERVATION) != null) {
float reservedMemory = (vmSpec.getMaxRam() * Float.parseFloat(vmSpec.getDetails().get(VmDetailConstants.RAM_RESERVATION)));
return (int) (reservedMemory / ResourceType.bytesToMiB);
}
@ -3630,18 +3640,18 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context)
throws Exception {
if (diskInfoBuilder != null) {
VolumeObjectTO volume = (VolumeObjectTO) vol.getData();
String chainInfo = volume.getChainInfo();
Map<String, String> details = vol.getDetails();
boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
String iScsiName = details.get(DiskTO.IQN);
String datastoreUUID = volume.getDataStore().getUuid();
return getMatchingExistingDiskWithVolumeDetails(diskInfoBuilder, volume.getPath(), chainInfo, isManaged, iScsiName, datastoreUUID, hyperHost, context);
} else {
if (diskInfoBuilder == null) {
return null;
}
VolumeObjectTO volume = (VolumeObjectTO) vol.getData();
String chainInfo = volume.getChainInfo();
Map<String, String> details = vol.getDetails();
boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
String iScsiName = details.get(DiskTO.IQN);
String datastoreUUID = volume.getDataStore().getUuid();
return getMatchingExistingDiskWithVolumeDetails(diskInfoBuilder, volume.getPath(), chainInfo, isManaged, iScsiName, datastoreUUID, hyperHost, context);
}
private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo matchingExistingDisk, DiskTO vol, Pair<String, String> controllerInfo, boolean deployAsIs) throws Exception {
@ -3666,34 +3676,36 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
return VmwareHelper.getControllerBasedOnDiskType(controllerInfo, vol);
}
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey,
int scsiControllerKey, Map<String, Map<String, String>> iqnToData, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks,
Map<String, Map<String, String>> iqnToData, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
for (DiskTO vol : sortedDisks) {
if (vol.getType() == Volume.Type.ISO)
continue;
VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData();
VirtualMachineDiskInfo diskInfo = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context);
assert (diskInfo != null);
if (diskInfo == null) {
continue;
}
String[] diskChain = diskInfo.getDiskChain();
assert (diskChain.length > 0);
Map<String, String> details = vol.getDetails();
boolean managed = false;
if (details != null) {
managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED));
if (diskChain.length <= 0) {
continue;
}
DatastoreFile file = new DatastoreFile(diskChain[0]);
boolean managed = false;
Map<String, String> details = vol.getDetails();
if (details != null) {
managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED));
}
VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData();
if (managed) {
DatastoreFile originalFile = new DatastoreFile(volumeTO.getPath());
if (!file.getFileBaseName().equalsIgnoreCase(originalFile.getFileBaseName())) {
if (logger.isInfoEnabled())
logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]);
@ -3706,7 +3718,6 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
}
VolumeObjectTO volInSpec = getVolumeInSpec(vmSpec, volumeTO);
if (volInSpec != null) {
if (managed) {
Map<String, String> data = new HashMap<>();
@ -3871,20 +3882,20 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
if (diskInfo != null) {
logger.info("Found existing disk info from volume path: " + volume.getPath());
return dsMo;
} else {
String chainInfo = volume.getChainInfo();
if (chainInfo != null) {
VirtualMachineDiskInfo infoInChain = _gson.fromJson(chainInfo, VirtualMachineDiskInfo.class);
if (infoInChain != null) {
String[] disks = infoInChain.getDiskChain();
if (disks.length > 0) {
for (String diskPath : disks) {
DatastoreFile file = new DatastoreFile(diskPath);
diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName);
if (diskInfo != null) {
logger.info("Found existing disk from chain info: " + diskPath);
return dsMo;
}
}
String chainInfo = volume.getChainInfo();
if (chainInfo != null) {
VirtualMachineDiskInfo infoInChain = _gson.fromJson(chainInfo, VirtualMachineDiskInfo.class);
if (infoInChain != null) {
String[] disks = infoInChain.getDiskChain();
if (disks.length > 0) {
for (String diskPath : disks) {
DatastoreFile file = new DatastoreFile(diskPath);
diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName);
if (diskInfo != null) {
logger.info("Found existing disk from chain info: " + diskPath);
return dsMo;
}
}
}
@ -4747,7 +4758,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
Map<Integer, Long> volumeDeviceKey = new HashMap<>();
if (cmd instanceof MigrateVolumeCommand) { // Else device keys will be found in relocateVirtualMachine
MigrateVolumeCommand mcmd = (MigrateVolumeCommand) cmd;
addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId());
addVolumeDiskMapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId());
if (logger.isTraceEnabled()) {
for (Integer diskId: volumeDeviceKey.keySet()) {
logger.trace(String.format("Disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId)));
@ -4765,9 +4776,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
Answer createAnswerForCmd(VirtualMachineMO vmMo, List<VolumeObjectTO> volumeObjectToList, Command cmd, Map<Integer, Long> volumeDeviceKey) throws Exception {
List<VolumeObjectTO> volumeToList;
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
VirtualDisk[] disks = vmMo.getAllDiskDevice();
Answer answer;
if (logger.isTraceEnabled()) {
logger.trace(String.format("creating answer for %s", cmd.getClass().getSimpleName()));
}
@ -4784,7 +4793,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
return new Answer(cmd, false, null);
}
private void addVolumeDiskmapping(VirtualMachineMO vmMo, Map<Integer, Long> volumeDeviceKey, String volumePath, long volumeId) throws Exception {
private void addVolumeDiskMapping(VirtualMachineMO vmMo, Map<Integer, Long> volumeDeviceKey, String volumePath, long volumeId) throws Exception {
if (logger.isDebugEnabled()) {
logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath));
}
@ -4919,7 +4928,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
VmwareHypervisorHost dsHost = hyperHostInTargetCluster == null ? hyperHost : hyperHostInTargetCluster;
String targetDsName = cmd.getTargetPool().getUuid();
morDestinationDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(dsHost, targetDsName);
if(morDestinationDS == null) {
if (morDestinationDS == null) {
String msg = "Unable to find the target datastore: " + targetDsName + " on host: " + dsHost.getHyperHostName();
logger.error(msg);
throw new CloudRuntimeException(msg);
@ -5886,6 +5895,11 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
logger.debug(msg);
return new Answer(cmd, true, msg);
} catch (Exception e) {
if (e.getMessage().contains("was not found")) {
String msg = String.format("%s - VM [%s] file(s) not found, cleanup not needed .", e.getMessage(), cmd.getVmName());
logger.debug(msg);
return new Answer(cmd, true, msg);
}
return new Answer(cmd, false, createLogMessageException(e, cmd));
}
}

View File

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.vmware.vim25.ManagedObjectReference;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.logging.log4j.Logger;
@ -193,7 +194,7 @@ public class VmwareStorageLayoutHelper implements Configurable {
if (ds.fileExists(vmdkFullCloneModeLegacyPair[i])) {
LOGGER.info("sync " + vmdkFullCloneModeLegacyPair[i] + "->" + vmdkFullCloneModePair[i]);
ds.moveDatastoreFile(vmdkFullCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkFullCloneModePair[i], dcMo.getMor(), true);
moveDatastoreFile(ds, vmdkFullCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkFullCloneModePair[i], dcMo.getMor(), true);
}
}
@ -201,13 +202,13 @@ public class VmwareStorageLayoutHelper implements Configurable {
if (ds.fileExists(vmdkLinkedCloneModeLegacyPair[i])) {
LOGGER.info("sync " + vmdkLinkedCloneModeLegacyPair[i] + "->" + vmdkLinkedCloneModePair[i]);
ds.moveDatastoreFile(vmdkLinkedCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[i], dcMo.getMor(), true);
moveDatastoreFile(ds, vmdkLinkedCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[i], dcMo.getMor(), true);
}
}
if (ds.fileExists(vmdkLinkedCloneModeLegacyPair[0])) {
LOGGER.info("sync " + vmdkLinkedCloneModeLegacyPair[0] + "->" + vmdkLinkedCloneModePair[0]);
ds.moveDatastoreFile(vmdkLinkedCloneModeLegacyPair[0], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[0], dcMo.getMor(), true);
moveDatastoreFile(ds, vmdkLinkedCloneModeLegacyPair[0], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[0], dcMo.getMor(), true);
}
// Note: we will always return a path
@ -242,14 +243,14 @@ public class VmwareStorageLayoutHelper implements Configurable {
String targetPath = getDatastorePathBaseFolderFromVmdkFileName(ds, String.format("%s-%s",vmdkName, linkedCloneExtension));
LOGGER.info("Fixup folder-synchronization. move " + companionFilePath + " -> " + targetPath);
ds.moveDatastoreFile(companionFilePath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true);
moveDatastoreFile(ds, companionFilePath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true);
}
}
// move the identity VMDK file the last
String targetPath = getDatastorePathBaseFolderFromVmdkFileName(ds, vmdkName + ".vmdk");
LOGGER.info("Fixup folder-synchronization. move " + fileDsFullPath + " -> " + targetPath);
ds.moveDatastoreFile(fileDsFullPath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true);
moveDatastoreFile(ds, fileDsFullPath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true);
try {
if (folderName != null) {
@ -287,7 +288,7 @@ public class VmwareStorageLayoutHelper implements Configurable {
DatastoreFile targetFile = new DatastoreFile(file.getDatastoreName(), HypervisorHostHelper.VSPHERE_DATASTORE_BASE_FOLDER, file.getFileName());
if (!targetFile.getPath().equalsIgnoreCase(file.getPath())) {
LOGGER.info("Move " + file.getPath() + " -> " + targetFile.getPath());
dsMo.moveDatastoreFile(file.getPath(), dcMo.getMor(), dsMo.getMor(), targetFile.getPath(), dcMo.getMor(), true);
moveDatastoreFile(dsMo, file.getPath(), dcMo.getMor(), dsMo.getMor(), targetFile.getPath(), dcMo.getMor(), true);
List<String> vSphereFileExtensions = new ArrayList<>(Arrays.asList(VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*")));
// add flat file format to the above list
@ -297,7 +298,7 @@ public class VmwareStorageLayoutHelper implements Configurable {
String pairTargetFilePath = targetFile.getCompanionPath(String.format("%s-%s", file.getFileBaseName(), linkedCloneExtension));
if (dsMo.fileExists(pairSrcFilePath)) {
LOGGER.info("Move " + pairSrcFilePath + " -> " + pairTargetFilePath);
dsMo.moveDatastoreFile(pairSrcFilePath, dcMo.getMor(), dsMo.getMor(), pairTargetFilePath, dcMo.getMor(), true);
moveDatastoreFile(dsMo, pairSrcFilePath, dcMo.getMor(), dsMo.getMor(), pairTargetFilePath, dcMo.getMor(), true);
}
}
}
@ -429,6 +430,31 @@ public class VmwareStorageLayoutHelper implements Configurable {
return dsMo.searchFileInSubFolders(volumePath + ".vmdk", false, null);
}
public static boolean moveDatastoreFile(final DatastoreMO dsMo, String srcFilePath, ManagedObjectReference morSrcDc, ManagedObjectReference morDestDs,
String destFilePath, ManagedObjectReference morDestDc, boolean forceOverwrite) throws Exception {
final int retry = 20;
int retryAttempt = 0;
while (++retryAttempt <= retry) {
try {
LOGGER.debug(String.format("Move datastore file %s, attempt #%d", srcFilePath, retryAttempt));
return dsMo.moveDatastoreFile(srcFilePath, morSrcDc, morDestDs, destFilePath, morDestDc, forceOverwrite);
} catch (Exception e) {
LOGGER.info(String.format("Got exception while moving datastore file %s ", srcFilePath), e);
if (e.getMessage() != null && e.getMessage().contains("Unable to access file")) {
LOGGER.debug(String.format("Failed to move datastore file %s. Retrying", srcFilePath));
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
LOGGER.debug(String.format("Waiting to move datastore file %s been interrupted: ", srcFilePath));
}
} else {
throw e;
}
}
}
return false;
}
@Override
public String getConfigComponentName() {
return VmwareStorageLayoutHelper.class.getSimpleName();

View File

@ -682,9 +682,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairManagedDatastorePath(dsMo, null,
managedStoragePoolRootVolumeName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false);
dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true);
VmwareStorageLayoutHelper.moveDatastoreFile(dsMo, vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true);
for (int i=1; i<vmwareLayoutFilePair.length; i++) {
dsMo.moveDatastoreFile(vmwareLayoutFilePair[i], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[i], dcMo.getMor(), true);
VmwareStorageLayoutHelper.moveDatastoreFile(dsMo, vmwareLayoutFilePair[i], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[i], dcMo.getMor(), true);
}
String folderToDelete = dsMo.getDatastorePath(managedStoragePoolRootVolumeName, true);
@ -814,7 +814,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
existingVm.detachAllDisksAndDestroy();
}
logger.info("ROOT Volume from deploy-as-is template, cloning template");
cloneVMFromTemplate(hyperHost, template.getPath(), vmName, primaryStore.getUuid());
cloneVMFromTemplate(hyperHost, template, volume, vmName, primaryStore.getUuid());
} else {
logger.info("ROOT Volume from deploy-as-is template, volume already created at this point");
}
@ -945,7 +945,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag);
for (int i = 0; i < vmwareLayoutFilePair.length; i++) {
dsMo.moveDatastoreFile(vmwareLayoutFilePair[i], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[i], dcMo.getMor(), true);
VmwareStorageLayoutHelper.moveDatastoreFile(dsMo, vmwareLayoutFilePair[i], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[i], dcMo.getMor(), true);
}
logger.info("detach disks from volume-wrapper VM and destroy {}", vmdkName);
@ -1222,10 +1222,10 @@ public class VmwareStorageProcessor implements StorageProcessor {
// Get VMDK filename
String templateVMDKName = "";
File[] files = new File(installFullPath).listFiles();
if(files != null) {
if (files != null) {
for(File file : files) {
String fileName = file.getName();
if(fileName.toLowerCase().startsWith(templateUniqueName) && fileName.toLowerCase().endsWith(".vmdk")) {
if (fileName.toLowerCase().startsWith(templateUniqueName) && fileName.toLowerCase().endsWith(".vmdk")) {
templateVMDKName += fileName;
break;
}
@ -1856,16 +1856,16 @@ public class VmwareStorageProcessor implements StorageProcessor {
CopyCmdAnswer answer = null;
try {
if(vmName != null) {
if (vmName != null) {
vmMo = hyperHost.findVmOnHyperHost(vmName);
if (vmMo == null) {
if(logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter");
}
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
}
}
if(vmMo == null) {
if (vmMo == null) {
dsMo = new DatastoreMO(hyperHost.getContext(), morDs);
workerVMName = hostService.getWorkerName(context, cmd, 0, dsMo);
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVMName, null);
@ -1899,10 +1899,10 @@ public class VmwareStorageProcessor implements StorageProcessor {
String secondaryMountPoint = mountService.getMountPoint(secondaryStorageUrl, _nfsVersion);
String snapshotDir = destSnapshot.getPath() + "/" + snapshotBackupUuid;
File[] files = new File(secondaryMountPoint + "/" + snapshotDir).listFiles();
if(files != null) {
if (files != null) {
for(File file : files) {
String fileName = file.getName();
if(fileName.toLowerCase().startsWith(snapshotBackupUuid) && fileName.toLowerCase().endsWith(".vmdk")) {
if (fileName.toLowerCase().startsWith(snapshotBackupUuid) && fileName.toLowerCase().endsWith(".vmdk")) {
physicalSize = new File(secondaryMountPoint + "/" + snapshotDir + "/" + fileName).length();
break;
}
@ -3651,7 +3651,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
}
workerVm.tagAsWorkerVM();
if(!primaryDsMo.getDatastoreType().equalsIgnoreCase("VVOL")) {
if (!primaryDsMo.getDatastoreType().equalsIgnoreCase("VVOL")) {
HypervisorHostHelper.createBaseFolderInDatastore(primaryDsMo, primaryDsMo.getDataCenterMor());
workerVm.moveAllVmDiskFiles(primaryDsMo, HypervisorHostHelper.VSPHERE_DATASTORE_BASE_FOLDER, false);
}
@ -3811,8 +3811,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
/**
* Return the cloned VM from the template
*/
public VirtualMachineMO cloneVMFromTemplate(VmwareHypervisorHost hyperHost, String templateName, String cloneName, String templatePrimaryStoreUuid) {
public VirtualMachineMO cloneVMFromTemplate(VmwareHypervisorHost hyperHost, TemplateObjectTO template, VolumeObjectTO volume, String cloneName, String templatePrimaryStoreUuid) {
try {
String templateName = template.getPath();
VmwareContext context = hyperHost.getContext();
DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter());
VirtualMachineMO templateMo = dcMo.findVm(templateName);
@ -3826,6 +3827,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
throw new CloudRuntimeException("Unable to find datastore in vSphere");
}
logger.info("Cloning VM " + cloneName + " from template " + templateName + " into datastore " + templatePrimaryStoreUuid);
if (template.getSize() != null) {
_fullCloneFlag = volume.getSize() > template.getSize() ? true : _fullCloneFlag;
}
if (!_fullCloneFlag) {
createVMLinkedClone(templateMo, dcMo, cloneName, morDatastore, morPool, null);
} else {

View File

@ -70,6 +70,12 @@ public class ListVmwareDcVmsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.PASSWORD, type = CommandType.STRING, description = "The password for specified username.")
private String password;
@Parameter(name = ApiConstants.HOST_NAME, type = CommandType.STRING, description = "Name of the host on vCenter. Must be set along with the instancename parameter")
private String hostName;
@Parameter(name = ApiConstants.INSTANCE_NAME, type = CommandType.STRING, description = "Name of the VM on vCenter. Must be set along with the hostname parameter")
private String instanceName;
public String getVcenter() {
return vcenter;
}
@ -86,10 +92,18 @@ public class ListVmwareDcVmsCmd extends BaseListCmd {
return datacenterName;
}
public String getHostName() {
return hostName;
}
public Long getExistingVcenterId() {
return existingVcenterId;
}
public String getInstanceName() {
return instanceName;
}
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
checkParameters();
@ -125,6 +139,11 @@ public class ListVmwareDcVmsCmd extends BaseListCmd {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
"Please set all the information for a vCenter IP/Name, datacenter, username and password");
}
if ((StringUtils.isNotBlank(instanceName) && StringUtils.isBlank(hostName)) ||
(StringUtils.isBlank(instanceName) && StringUtils.isNotBlank(hostName))) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
"Please set the hostname parameter along with the instancename parameter");
}
}
@Override

View File

@ -373,7 +373,7 @@ public class ManagementServerMock {
ConfigurationManager mgr = (ConfigurationManager)_configService;
_zone =
mgr.createZone(User.UID_SYSTEM, "default", "8.8.8.8", null, "8.8.4.4", null, null /* cidr */, "ROOT", Domain.ROOT_DOMAIN, NetworkType.Advanced, null,
null /* networkDomain */, false, false, null, null, false);
null /* networkDomain */, false, false, null, null, false, null);
}
}

View File

@ -66,7 +66,7 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
logger.trace(String.format("Found suitable storage pool [%s], adding to list.", pool));
suitablePools.add(pol);
}
}

View File

@ -26,6 +26,8 @@ import java.util.StringTokenizer;
import javax.inject.Inject;
import com.cloud.host.Host;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -50,7 +52,6 @@ import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -90,6 +91,8 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
DataCenterDao _zoneDao;
@Inject
CapacityManager _capacityMgr;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -356,17 +359,13 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId());
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId());
}
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primarystore);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
if (!dataStoreVO.isManaged()) {
boolean success = false;
for (HostVO host : allHosts) {
success = createStoragePool(host, primarystore);
for (HostVO h : hostsToConnect) {
success = createStoragePool(h, primarystore);
if (success) {
break;
}
@ -375,7 +374,7 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
for (HostVO h : hostsToConnect) {
try {
storageMgr.connectHostToSharedPool(h, primarystore.getId());
poolHosts.add(h);
@ -428,10 +427,11 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
logger.debug("In createPool. Attaching the pool to each of the hosts.");
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hosts) {
for (HostVO host : hostsToConnect) {
try {
storageMgr.connectHostToSharedPool(host, dataStore.getId());
poolHosts.add(host);

View File

@ -25,7 +25,6 @@ import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -38,8 +37,10 @@ import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -84,6 +85,8 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
private StoragePoolHostDao _storagePoolHostDao;
@Inject
private StoragePoolAutomation storagePoolAutomation;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -97,6 +100,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
boolean isTagARule = (Boolean)dsInfos.get("isTagARule");
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
@ -179,6 +183,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.Any);
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule(isTagARule);
parameters.setDetails(details);
@ -243,22 +248,13 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
@Override
public boolean attachCluster(DataStore datastore, ClusterScope scope) {
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo);
// check if there is at least one host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
storagePoolDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
}
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryDataStoreInfo.getClusterId()));
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId());
@ -288,19 +284,15 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
List<HostVO> xenServerHosts = _resourceMgr
.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr
.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM,
scope.getScopeId());
List<HostVO> hosts = new ArrayList<HostVO>();
List<HostVO> hostsToConnect = new ArrayList<>();
HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM};
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HypervisorType type : hypervisorTypes) {
hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type));
}
for (HostVO host : hosts) {
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -453,9 +453,18 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
boolean encryptionRequired = anyVolumeRequiresEncryption(vol);
long [] endpointsToRunResize = resizeParameter.hosts;
CreateCmdResult result = new CreateCmdResult(null, null);
// if hosts are provided, they are where the VM last ran. We can use that.
if (endpointsToRunResize == null || endpointsToRunResize.length == 0) {
EndPoint ep = epSelector.select(data, encryptionRequired);
if (ep == null) {
String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired);
logger.error(errMsg);
result.setResult(errMsg);
callback.complete(result);
return;
}
endpointsToRunResize = new long[] {ep.getId()};
}
ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(),
@ -463,7 +472,6 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
if (pool.getParent() != 0) {
resizeCmd.setContextParam(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString());
}
CreateCmdResult result = new CreateCmdResult(null, null);
try {
ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, endpointsToRunResize, resizeCmd);
if (answer != null && answer.getResult()) {
@ -480,7 +488,6 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
logger.debug("return a null answer, mark it as failed for unknown reason");
result.setResult("return a null answer, mark it as failed for unknown reason");
}
} catch (Exception e) {
logger.debug("sending resize command failed", e);
result.setResult(e.toString());

View File

@ -18,7 +18,6 @@
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@ -26,6 +25,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@ -51,7 +51,6 @@ import com.cloud.dc.dao.HostPodDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.StorageConflictException;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -63,6 +62,7 @@ import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolWorkDao;
import com.cloud.storage.dao.VolumeDao;
@ -129,6 +129,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
StoragePoolAutomation storagePoolAutmation;
@Inject
protected HostDao _hostDao;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@SuppressWarnings("unchecked")
@Override
@ -146,9 +148,11 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
String tags = (String)dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule"));
parameters.setDetails(details);
@ -386,17 +390,15 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
}
private Pair<List<Long>, Boolean> prepareOcfs2NodesIfNeeded(PrimaryDataStoreInfo primaryStore) {
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryStore);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId()));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
if (!StoragePoolType.OCFS2.equals(primaryStore.getPoolType())) {
return new Pair<>(_hostDao.listIdsForUpRouting(primaryStore.getDataCenterId(),
primaryStore.getPodId(), primaryStore.getClusterId()), true);
return new Pair<>(hostIds, true);
}
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(),
primaryStore.getPodId(), primaryStore.getDataCenterId());
if (allHosts.isEmpty()) {
return new Pair<>(Collections.emptyList(), true);
}
List<Long> hostIds = allHosts.stream().map(HostVO::getId).collect(Collectors.toList());
if (!_ocfs2Mgr.prepareNodes(allHosts, primaryStore)) {
if (!_ocfs2Mgr.prepareNodes(hostsToConnect, primaryStore)) {
return new Pair<>(hostIds, false);
}
return new Pair<>(hostIds, true);
@ -432,8 +434,9 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
@Override
public boolean attachZone(DataStore store, ZoneScope scope, HypervisorType hypervisorType) {
List<Long> hostIds = _hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType);
logger.debug("In createPool. Attaching the pool to each of the hosts.");
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(store, scope.getScopeId(), hypervisorType);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
storageMgr.connectHostsToPool(store, hostIds, scope, true, true);
dataStoreHelper.attachZone(store, hypervisorType);
return true;

View File

@ -25,7 +25,7 @@ import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.List;
import java.util.Arrays;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -143,9 +143,15 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase {
storageMgr.registerHostListener("default", hostListener);
HostVO host1 = Mockito.mock(HostVO.class);
HostVO host2 = Mockito.mock(HostVO.class);
Mockito.when(host1.getId()).thenReturn(1L);
Mockito.when(host2.getId()).thenReturn(2L);
when(_resourceMgr.getEligibleUpHostsInClusterForStorageConnection(store))
.thenReturn(Arrays.asList(host1, host2));
when(hostDao.listIdsForUpRouting(anyLong(), anyLong(), anyLong()))
.thenReturn(List.of(1L, 2L));
when(hostDao.findById(anyLong())).thenReturn(mock(HostVO.class));
when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer);
when(answer.getResult()).thenReturn(true);

View File

@ -39,6 +39,7 @@ import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -68,6 +69,8 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi
@Inject
private CapacityManager _capacityMgr;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Inject
AgentManager _agentMgr;
public LinstorPrimaryDataStoreLifeCycleImpl()
@ -204,20 +207,12 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
// check if there is at least one host up in this cluster
List<HostVO> allHosts = resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
_primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
}
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) dataStore;
List<HostVO> hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
List<HostVO> poolHosts = new ArrayList<>();
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
try {
createStoragePool(host, primaryDataStoreInfo);
@ -249,10 +244,11 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
List<HostVO> hosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType,
scope.getScopeId());
List<HostVO> hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType);
for (HostVO host : hosts) {
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -18,7 +18,6 @@ package org.apache.cloudstack.storage.datastore.provider;
import com.cloud.exception.StorageConflictException;
import com.cloud.host.HostVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
public class LinstorHostListener extends DefaultHostListener {
@Override
@ -28,7 +27,6 @@ public class LinstorHostListener extends DefaultHostListener {
host.setParent(host.getName());
hostDao.update(host.getId(), host);
}
StoragePoolVO pool = primaryStoreDao.findById(poolId);
return super.hostConnect(host, pool);
return super.hostConnect(hostId, poolId);
}
}

View File

@ -24,6 +24,7 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -58,6 +59,8 @@ public class NexentaPrimaryDataStoreLifeCycle
StorageManager _storageMgr;
@Inject
private StoragePoolAutomation storagePoolAutomation;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -130,16 +133,14 @@ public class NexentaPrimaryDataStoreLifeCycle
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
List<HostVO> xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, scope.getScopeId());
List<HostVO> hosts = new ArrayList<HostVO>();
List<HostVO> hostsToConnect = new ArrayList<>();
Hypervisor.HypervisorType[] hypervisorTypes = {Hypervisor.HypervisorType.XenServer, Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM};
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HostVO host : hosts) {
for (Hypervisor.HypervisorType type : hypervisorTypes) {
hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type));
}
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -18,6 +18,39 @@
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.host.HostVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.utils.StringUtils;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
@ -34,41 +67,15 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.template.TemplateManager;
import com.cloud.utils.StringUtils;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManager;
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManagerImpl;
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.commons.collections.CollectionUtils;
import javax.inject.Inject;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
@Inject
@ -98,6 +105,8 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
@Inject
private AgentManager agentMgr;
private ScaleIOSDCManager sdcManager;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
public ScaleIOPrimaryDataStoreLifeCycle() {
sdcManager = new ScaleIOSDCManagerImpl();
@ -141,6 +150,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
Long capacityBytes = (Long)dsInfos.get("capacityBytes");
Long capacityIops = (Long)dsInfos.get("capacityIops");
String tags = (String)dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
@ -223,6 +233,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
parameters.setHypervisorType(Hypervisor.HypervisorType.KVM);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule(isTagARule);
StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics();
@ -260,14 +271,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
}
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
List<Long> hostIds = hostDao.listIdsForUpRouting(primaryDataStoreInfo.getDataCenterId(),
primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getClusterId());
if (hostIds.isEmpty()) {
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster);
}
List<HostVO> hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryDataStoreInfo);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, cluster));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
logger.debug("Attaching the pool to each of the hosts in the {}", cluster);
storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false);
dataStoreHelper.attachCluster(dataStore);
@ -287,7 +294,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
logger.debug("Attaching the pool to each of the hosts in the {}",
dataCenterDao.findById(scope.getScopeId()));
List<Long> hostIds = hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType);
List<HostVO> hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the zone: %s", hostsToConnect, scope.getScopeId()));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false);
dataStoreHelper.attachZone(dataStore);

View File

@ -30,8 +30,11 @@ import static org.mockito.Mockito.mockStatic;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.cloud.host.HostVO;
import com.cloud.resource.ResourceManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
@ -106,6 +109,9 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
@Mock
private HypervisorHostListener hostListener;
@Mock
private ResourceManager resourceManager;
@InjectMocks
private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest;
private AutoCloseable closeable;
@ -137,8 +143,14 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
final ZoneScope scope = new ZoneScope(1L);
when(hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), Hypervisor.HypervisorType.KVM))
.thenReturn(List.of(1L, 2L));
HostVO host1 = Mockito.mock(HostVO.class);
HostVO host2 = Mockito.mock(HostVO.class);
Mockito.when(host1.getId()).thenReturn(1L);
Mockito.when(host2.getId()).thenReturn(2L);
when(resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM))
.thenReturn(Arrays.asList(host1, host2));
when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store);
when(store.isShared()).thenReturn(true);

View File

@ -25,6 +25,7 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -43,7 +44,6 @@ import com.cloud.capacity.CapacityManager;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
@ -74,6 +74,8 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife
@Inject private StoragePoolAutomation _storagePoolAutomation;
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject private VMTemplatePoolDao _tmpltPoolDao;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
@ -235,11 +237,10 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
List<HostVO> hosts =
_resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
for (HostVO host : hosts) {
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
@ -254,16 +255,15 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
List<HostVO> xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
List<HostVO> hosts = new ArrayList<>();
List<HostVO> hostsToConnect = new ArrayList<>();
HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM};
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HypervisorType type : hypervisorTypes) {
hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type));
}
for (HostVO host : hosts) {
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -26,6 +26,8 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.host.Host;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -50,7 +52,6 @@ import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -85,6 +86,8 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject private StoragePoolHostDao storagePoolHostDao;
@Inject private TemplateManager tmpltMgr;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
@ -382,19 +385,12 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto
public boolean attachCluster(DataStore store, ClusterScope scope) {
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo)store;
// check if there is at least one host up in this cluster
List<HostVO> allHosts = resourceMgr.listAllUpHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(),
primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primaryDataStoreInfo.getClusterId())));
}
List<HostVO> hostsToConnect = resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo);
boolean success = false;
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, clusterDao.findById(primaryDataStoreInfo.getClusterId())));
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
success = createStoragePool(host, primaryDataStoreInfo);
if (success) {
@ -408,7 +404,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto
List<HostVO> poolHosts = new ArrayList<>();
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
try {
storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId());

View File

@ -24,6 +24,7 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -80,6 +81,8 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC
private VMTemplateDetailsDao vmTemplateDetailsDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -208,8 +211,11 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC
if (hypervisorType != HypervisorType.KVM) {
throw new UnsupportedOperationException("Only KVM hypervisors supported!");
}
List<HostVO> kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
for (HostVO host : kvmHosts) {
List<HostVO> kvmHostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), HypervisorType.KVM);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", kvmHostsToConnect));
for (HostVO host : kvmHostsToConnect) {
try {
storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -2007,6 +2007,10 @@ public class ApiDBUtils {
return s_projectInvitationJoinDao.newProjectInvitationView(proj);
}
public static HostResponse newMinimalHostResponse(HostJoinVO vr) {
return s_hostJoinDao.newMinimalHostResponse(vr);
}
public static HostResponse newHostResponse(HostJoinVO vr, EnumSet<HostDetails> details) {
return s_hostJoinDao.newHostResponse(vr, details);
}
@ -2035,6 +2039,10 @@ public class ApiDBUtils {
return s_poolJoinDao.newStoragePoolResponse(vr, customStats);
}
public static StoragePoolResponse newMinimalStoragePoolResponse(StoragePoolJoinVO vr) {
return s_poolJoinDao.newMinimalStoragePoolResponse(vr);
}
public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) {
return s_tagDao.newStorageTagResponse(vr);
}
@ -2164,6 +2172,10 @@ public class ApiDBUtils {
return s_dcJoinDao.newDataCenterResponse(view, dc, showCapacities, showResourceImage);
}
public static ZoneResponse newMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO dc) {
return s_dcJoinDao.newMinimalDataCenterResponse(view, dc);
}
public static DataCenterJoinVO newDataCenterView(DataCenter dc) {
return s_dcJoinDao.newDataCenterView(dc);
}

View File

@ -1301,6 +1301,15 @@ public class ApiResponseHelper implements ResponseGenerator {
return response;
}
@Override
public PodResponse createMinimalPodResponse(Pod pod) {
PodResponse podResponse = new PodResponse();
podResponse.setId(pod.getUuid());
podResponse.setName(pod.getName());
podResponse.setObjectName("pod");
return podResponse;
}
@Override
public PodResponse createPodResponse(Pod pod, Boolean showCapacities) {
String[] ipRange = new String[2];
@ -1344,7 +1353,7 @@ public class ApiResponseHelper implements ResponseGenerator {
PodResponse podResponse = new PodResponse();
podResponse.setId(pod.getUuid());
podResponse.setName(pod.getName());
DataCenter zone = ApiDBUtils.findZoneById(pod.getDataCenterId());
DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId());
if (zone != null) {
podResponse.setZoneId(zone.getUuid());
podResponse.setZoneName(zone.getName());
@ -1357,6 +1366,8 @@ public class ApiResponseHelper implements ResponseGenerator {
podResponse.setVlanId(vlanIds);
podResponse.setGateway(pod.getGateway());
podResponse.setAllocationState(pod.getAllocationState().toString());
podResponse.setStorageAccessGroups(pod.getStorageAccessGroups());
podResponse.setZoneStorageAccessGroups(zone.getStorageAccessGroups());
if (showCapacities != null && showCapacities) {
List<SummedCapacity> capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null);
Set<CapacityResponse> capacityResponses = new HashSet<CapacityResponse>();
@ -1506,6 +1517,15 @@ public class ApiResponseHelper implements ResponseGenerator {
return listPools.get(0);
}
@Override
public ClusterResponse createMinimalClusterResponse(Cluster cluster) {
ClusterResponse clusterResponse = new ClusterResponse();
clusterResponse.setId(cluster.getUuid());
clusterResponse.setName(cluster.getName());
clusterResponse.setObjectName("cluster");
return clusterResponse;
}
@Override
public ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities) {
ClusterResponse clusterResponse = new ClusterResponse();
@ -1516,7 +1536,7 @@ public class ApiResponseHelper implements ResponseGenerator {
clusterResponse.setPodId(pod.getUuid());
clusterResponse.setPodName(pod.getName());
}
DataCenter dc = ApiDBUtils.findZoneById(cluster.getDataCenterId());
DataCenterVO dc = ApiDBUtils.findZoneById(cluster.getDataCenterId());
if (dc != null) {
clusterResponse.setZoneId(dc.getUuid());
clusterResponse.setZoneName(dc.getName());
@ -1534,6 +1554,10 @@ public class ApiResponseHelper implements ResponseGenerator {
clusterResponse.setArch(cluster.getArch().getType());
}
clusterResponse.setStorageAccessGroups(cluster.getStorageAccessGroups());
clusterResponse.setPodStorageAccessGroups(pod.getStorageAccessGroups());
clusterResponse.setZoneStorageAccessGroups(dc.getStorageAccessGroups());
if (showCapacities != null && showCapacities) {
List<SummedCapacity> capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId());
Set<CapacityResponse> capacityResponses = new HashSet<CapacityResponse>();
@ -5267,6 +5291,8 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setMemory(instance.getMemory());
response.setOperatingSystemId(instance.getOperatingSystemId());
response.setOperatingSystem(instance.getOperatingSystem());
response.setBootMode(instance.getBootMode());
response.setBootType(instance.getBootType());
response.setObjectName("unmanagedinstance");
if (instance.getDisks() != null) {

View File

@ -36,6 +36,12 @@ import java.util.stream.Stream;
import javax.inject.Inject;
import com.cloud.dc.Pod;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.org.Cluster;
import com.cloud.server.ManagementService;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
import org.apache.cloudstack.acl.SecurityChecker;
@ -52,6 +58,7 @@ import org.apache.cloudstack.api.ResourceDetail;
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.ResponseObject.ResponseView;
import org.apache.cloudstack.api.command.admin.account.ListAccountsCmdByAdmin;
import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmdByAdmin;
import org.apache.cloudstack.api.command.admin.host.ListHostTagsCmd;
@ -59,6 +66,7 @@ import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
import org.apache.cloudstack.api.command.admin.internallb.ListInternalLBVMsCmd;
import org.apache.cloudstack.api.command.admin.iso.ListIsosCmdByAdmin;
import org.apache.cloudstack.api.command.admin.management.ListMgmtsCmd;
import org.apache.cloudstack.api.command.admin.pod.ListPodsByCmd;
import org.apache.cloudstack.api.command.admin.resource.icon.ListResourceIconCmd;
import org.apache.cloudstack.api.command.admin.router.GetRouterHealthCheckResultsCmd;
import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd;
@ -66,6 +74,7 @@ import org.apache.cloudstack.api.command.admin.snapshot.ListSnapshotsCmdByAdmin;
import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
@ -100,6 +109,7 @@ import org.apache.cloudstack.api.command.user.zone.ListZonesCmd;
import org.apache.cloudstack.api.response.AccountResponse;
import org.apache.cloudstack.api.response.AsyncJobResponse;
import org.apache.cloudstack.api.response.BucketResponse;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.DetailOptionsResponse;
import org.apache.cloudstack.api.response.DiskOfferingResponse;
import org.apache.cloudstack.api.response.DomainResponse;
@ -114,6 +124,7 @@ import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.ManagementServerResponse;
import org.apache.cloudstack.api.response.ObjectStoreResponse;
import org.apache.cloudstack.api.response.PeerManagementServerNodeResponse;
import org.apache.cloudstack.api.response.PodResponse;
import org.apache.cloudstack.api.response.ProjectAccountResponse;
import org.apache.cloudstack.api.response.ProjectInvitationResponse;
import org.apache.cloudstack.api.response.ProjectResponse;
@ -125,6 +136,7 @@ import org.apache.cloudstack.api.response.SecondaryStorageHeuristicsResponse;
import org.apache.cloudstack.api.response.SecurityGroupResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.api.response.SnapshotResponse;
import org.apache.cloudstack.api.response.StorageAccessGroupResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.api.response.TemplateResponse;
@ -618,6 +630,18 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
@Inject
private AsyncJobManager jobManager;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Inject
public ManagementService managementService;
@Inject
DataCenterDao dataCenterDao;
@Inject
HostPodDao podDao;
private SearchCriteria<ServiceOfferingJoinVO> getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) {
SearchCriteria<ServiceOfferingJoinVO> sc = _srvOfferingJoinDao.createSearchCriteria();
SearchCriteria<ServiceOfferingJoinVO> sc1 = _srvOfferingJoinDao.createSearchCriteria();
@ -2342,6 +2366,16 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
private ListResponse<HostResponse> searchForServersWithMinimalResponse(ListHostsCmd cmd) {
logger.debug(">>>Searching for hosts>>>");
Pair<List<HostJoinVO>, Integer> hosts = searchForServersInternal(cmd);
ListResponse<HostResponse> response = new ListResponse<HostResponse>();
logger.debug(">>>Generating Response>>>");
List<HostResponse> hostResponses = ViewResponseHelper.createMinimalHostResponse(hosts.first().toArray(new HostJoinVO[hosts.first().size()]));
response.setResponses(hostResponses, hosts.second());
return response;
}
public Pair<List<HostJoinVO>, Integer> searchForServersInternal(ListHostsCmd cmd) {
Pair<List<Long>, Integer> serverIdPage = searchForServerIdsAndCount(cmd);
@ -2373,6 +2407,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
Hypervisor.HypervisorType hypervisorType = cmd.getHypervisor();
Long msId = cmd.getManagementServerId();
final CPU.CPUArch arch = cmd.getArch();
String storageAccessGroup = cmd.getStorageAccessGroup();
Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize);
@ -2390,6 +2425,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
hostSearchBuilder.and("hypervisor_type", hostSearchBuilder.entity().getHypervisorType(), SearchCriteria.Op.EQ);
hostSearchBuilder.and("mgmt_server_id", hostSearchBuilder.entity().getManagementServerId(), SearchCriteria.Op.EQ);
hostSearchBuilder.and("arch", hostSearchBuilder.entity().getArch(), SearchCriteria.Op.EQ);
if (storageAccessGroup != null) {
hostSearchBuilder.and().op("storageAccessGroupExact", hostSearchBuilder.entity().getStorageAccessGroups(), Op.EQ);
hostSearchBuilder.or("storageAccessGroupPrefix", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
hostSearchBuilder.or("storageAccessGroupSuffix", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
hostSearchBuilder.or("storageAccessGroupMiddle", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
hostSearchBuilder.cp();
}
if (keyword != null) {
hostSearchBuilder.and().op("keywordName", hostSearchBuilder.entity().getName(), SearchCriteria.Op.LIKE);
@ -2481,6 +2523,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
sc.setParameters("arch", arch);
}
if (storageAccessGroup != null) {
sc.setParameters("storageAccessGroupExact", storageAccessGroup);
sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup);
sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%");
}
Pair<List<HostVO>, Integer> uniqueHostPair = hostDao.searchAndCount(sc, searchFilter);
Integer count = uniqueHostPair.second();
List<Long> hostIds = uniqueHostPair.first().stream().map(HostVO::getId).collect(Collectors.toList());
@ -3204,7 +3253,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
poolResponse.setCaps(caps);
}
private ListResponse<StoragePoolResponse> searchForStoragePoolsWithMinimalResponse(ListStoragePoolsCmd cmd) {
Pair<List<StoragePoolJoinVO>, Integer> result = searchForStoragePoolsInternal(cmd);
ListResponse<StoragePoolResponse> response = new ListResponse<>();
List<StoragePoolResponse> poolResponses = ViewResponseHelper.createMinimalStoragePoolResponse(result.first().toArray(new StoragePoolJoinVO[result.first().size()]));
response.setResponses(poolResponses, result.second());
return response;
}
private Pair<List<StoragePoolJoinVO>, Integer> searchForStoragePoolsInternal(ListStoragePoolsCmd cmd) {
ScopeType scopeType = ScopeType.validateAndGetScopeType(cmd.getScope());
@ -3216,16 +3272,18 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
String path = cmd.getPath();
Long pod = cmd.getPodId();
Long cluster = cmd.getClusterId();
Long host = cmd.getHostId();
String address = cmd.getIpAddress();
String keyword = cmd.getKeyword();
Long startIndex = cmd.getStartIndex();
Long pageSize = cmd.getPageSizeVal();
String storageAccessGroup = cmd.getStorageAccessGroup();
Filter searchFilter = new Filter(StoragePoolVO.class, "id", Boolean.TRUE, startIndex, pageSize);
Pair<List<Long>, Integer> uniquePoolPair = storagePoolDao.searchForIdsAndCount(id, name, zoneId, path, pod,
cluster, address, scopeType, status, keyword, searchFilter);
cluster, host, address, scopeType, status, keyword, storageAccessGroup, searchFilter);
List<StoragePoolJoinVO> storagePools = _poolJoinDao.searchByIds(uniquePoolPair.first().toArray(new Long[0]));
@ -3243,6 +3301,99 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
@Override
public ListResponse<StorageAccessGroupResponse> searchForStorageAccessGroups(ListStorageAccessGroupsCmd cmd) {
String name = cmd.getName();
String keyword = cmd.getKeyword();
Set<String> storageAccessGroups = new HashSet<>();
addStorageAccessGroups(storageAccessGroups, storagePoolAndAccessGroupMapDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, hostDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, clusterDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, podDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, dataCenterDao.listDistinctStorageAccessGroups(name, keyword));
if (StringUtils.isNotEmpty(name) && storageAccessGroups.contains(name)) {
storageAccessGroups = Collections.singleton(name);
}
if (StringUtils.isNotEmpty(keyword)) {
storageAccessGroups = storageAccessGroups.stream()
.filter(group -> group.contains(keyword))
.collect(Collectors.toSet());
}
List<StorageAccessGroupResponse> responseList = buildStorageAccessGroupResponses(storageAccessGroups, name);
ListResponse<StorageAccessGroupResponse> response = new ListResponse<>();
response.setResponses(responseList, storageAccessGroups.size());
return response;
}
private void addStorageAccessGroups(Set<String> storageAccessGroups, List<String> groups) {
for (String group : groups) {
if (group != null && !group.isEmpty()) {
storageAccessGroups.addAll(Arrays.asList(group.split(",")));
}
}
}
private List<StorageAccessGroupResponse> buildStorageAccessGroupResponses(
Set<String> storageAccessGroups, String name) {
List<StorageAccessGroupResponse> responseList = new ArrayList<>();
for (String sag : storageAccessGroups) {
StorageAccessGroupResponse sagResponse = new StorageAccessGroupResponse();
sagResponse.setName(sag);
sagResponse.setObjectName(ApiConstants.STORAGE_ACCESS_GROUP);
if (StringUtils.isNotBlank(name)) {
fetchStorageAccessGroupResponse(sagResponse, name);
}
responseList.add(sagResponse);
}
return responseList;
}
private void fetchStorageAccessGroupResponse(StorageAccessGroupResponse sagResponse, String name) {
sagResponse.setHostResponseList(searchForServersWithMinimalResponse(new ListHostsCmd(name)));
sagResponse.setZoneResponseList(listDataCentersWithMinimalResponse(new ListZonesCmd(name)));
sagResponse.setPodResponseList(fetchPodsByStorageAccessGroup(name));
sagResponse.setClusterResponseList(fetchClustersByStorageAccessGroup(name));
sagResponse.setStoragePoolResponseList(searchForStoragePoolsWithMinimalResponse(new ListStoragePoolsCmd(name)));
}
private ListResponse<PodResponse> fetchPodsByStorageAccessGroup(String name) {
ListPodsByCmd listPodsByCmd = new ListPodsByCmd(name);
Pair<List<? extends Pod>, Integer> podResponsePair = managementService.searchForPods(listPodsByCmd);
List<PodResponse> podResponses = podResponsePair.first().stream()
.map(pod -> {
PodResponse podResponse = responseGenerator.createMinimalPodResponse(pod);
podResponse.setObjectName("pod");
return podResponse;
}).collect(Collectors.toList());
ListResponse<PodResponse> podResponse = new ListResponse<>();
podResponse.setResponses(podResponses, podResponsePair.second());
return podResponse;
}
private ListResponse<ClusterResponse> fetchClustersByStorageAccessGroup(String name) {
ListClustersCmd listClustersCmd = new ListClustersCmd(name);
Pair<List<? extends Cluster>, Integer> clusterResponsePair = managementService.searchForClusters(listClustersCmd);
List<ClusterResponse> clusterResponses = clusterResponsePair.first().stream()
.map(cluster -> {
ClusterResponse clusterResponse = responseGenerator.createMinimalClusterResponse(cluster);
clusterResponse.setObjectName("cluster");
return clusterResponse;
}).collect(Collectors.toList());
ListResponse<ClusterResponse> clusterResponse = new ListResponse<>();
clusterResponse.setResponses(clusterResponses, clusterResponsePair.second());
return clusterResponse;
}
private Pair<List<StoragePoolTagVO>, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) {
Filter searchFilter = new Filter(StoragePoolTagVO.class, "id", Boolean.TRUE, null, null);
@ -4309,6 +4460,20 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
private ListResponse<ZoneResponse> listDataCentersWithMinimalResponse(ListZonesCmd cmd) {
Pair<List<DataCenterJoinVO>, Integer> result = listDataCentersInternal(cmd);
ListResponse<ZoneResponse> response = new ListResponse<ZoneResponse>();
ResponseView respView = ResponseView.Restricted;
if (cmd instanceof ListZonesCmdByAdmin || CallContext.current().getCallingAccount().getType() == Account.Type.ADMIN) {
respView = ResponseView.Full;
}
List<ZoneResponse> dcResponses = ViewResponseHelper.createMinimalDataCenterResponse(respView, result.first().toArray(new DataCenterJoinVO[result.first().size()]));
response.setResponses(dcResponses, result.second());
return response;
}
private Pair<List<DataCenterJoinVO>, Integer> listDataCentersInternal(ListZonesCmd cmd) {
Account account = CallContext.current().getCallingAccount();
Long domainId = cmd.getDomainId();
@ -4318,6 +4483,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
String name = cmd.getName();
String networkType = cmd.getNetworkType();
Map<String, String> resourceTags = cmd.getTags();
String storageAccessGroup = cmd.getStorageAccessGroup();
SearchBuilder<DataCenterJoinVO> sb = _dcJoinDao.createSearchBuilder();
if (resourceTags != null && !resourceTags.isEmpty()) {
@ -4331,6 +4497,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
sb.groupBy(sb.entity().getId());
sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER);
}
if (storageAccessGroup != null) {
sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), Op.EQ);
sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), Op.LIKE);
sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), Op.LIKE);
sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), Op.LIKE);
sb.cp();
}
Filter searchFilter = new Filter(DataCenterJoinVO.class, "sortKey", SortKeyAscending.value(), cmd.getStartIndex(), cmd.getPageSizeVal());
searchFilter.addOrderBy(DataCenterJoinVO.class, "id", true);
@ -4492,6 +4665,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
}
}
if (storageAccessGroup != null) {
sc.setParameters("storageAccessGroupExact", storageAccessGroup);
sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup);
sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%");
}
return _dcJoinDao.searchAndCount(sc, searchFilter);
}

Some files were not shown because too many files have changed in this diff Show More