mirror of https://github.com/apache/cloudstack.git
Merge branch 'master' into ui-vpc-redesign
This commit is contained in:
commit
7167bf4e67
|
|
@ -0,0 +1,45 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
/**
|
||||
*/
|
||||
public interface DeploymentClusterPlanner extends DeploymentPlanner {
|
||||
/**
|
||||
* This is called to determine list of possible clusters where a virtual
|
||||
* machine can be deployed.
|
||||
*
|
||||
* @param vm
|
||||
* virtual machine.
|
||||
* @param plan
|
||||
* deployment plan that tells you where it's being deployed to.
|
||||
* @param avoid
|
||||
* avoid these data centers, pods, clusters, or hosts.
|
||||
* @return DeployDestination for that virtual machine.
|
||||
*/
|
||||
List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid)
|
||||
throws InsufficientServerCapacityException;
|
||||
|
||||
PlannerResourceUsage getResourceUsage();
|
||||
|
||||
}
|
||||
|
|
@ -35,6 +35,7 @@ import com.cloud.vm.VirtualMachineProfile;
|
|||
/**
|
||||
*/
|
||||
public interface DeploymentPlanner extends Adapter {
|
||||
|
||||
/**
|
||||
* plan is called to determine where a virtual machine should be running.
|
||||
*
|
||||
|
|
@ -46,6 +47,7 @@ public interface DeploymentPlanner extends Adapter {
|
|||
* avoid these data centers, pods, clusters, or hosts.
|
||||
* @return DeployDestination for that virtual machine.
|
||||
*/
|
||||
@Deprecated
|
||||
DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException;
|
||||
|
||||
/**
|
||||
|
|
@ -88,6 +90,10 @@ public interface DeploymentPlanner extends Adapter {
|
|||
userconcentratedpod_firstfit;
|
||||
}
|
||||
|
||||
public enum PlannerResourceUsage {
|
||||
Shared, Dedicated;
|
||||
}
|
||||
|
||||
public static class ExcludeList {
|
||||
private Set<Long> _dcIds;
|
||||
private Set<Long> _podIds;
|
||||
|
|
@ -99,10 +105,22 @@ public interface DeploymentPlanner extends Adapter {
|
|||
}
|
||||
|
||||
public ExcludeList(Set<Long> _dcIds, Set<Long> _podIds, Set<Long> _clusterIds, Set<Long> _hostIds, Set<Long> _poolIds) {
|
||||
this._dcIds = _dcIds;
|
||||
this._podIds = _podIds;
|
||||
this._clusterIds = _clusterIds;
|
||||
this._poolIds = _poolIds;
|
||||
if (_dcIds != null) {
|
||||
this._dcIds = new HashSet<Long>(_dcIds);
|
||||
}
|
||||
if (_podIds != null) {
|
||||
this._podIds = new HashSet<Long>(_podIds);
|
||||
}
|
||||
if (_clusterIds != null) {
|
||||
this._clusterIds = new HashSet<Long>(_clusterIds);
|
||||
}
|
||||
|
||||
if (_hostIds != null) {
|
||||
this._hostIds = new HashSet<Long>(_hostIds);
|
||||
}
|
||||
if (_poolIds != null) {
|
||||
this._poolIds = new HashSet<Long>(_poolIds);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean add(InsufficientCapacityException e) {
|
||||
|
|
@ -195,6 +213,13 @@ public interface DeploymentPlanner extends Adapter {
|
|||
_hostIds.add(hostId);
|
||||
}
|
||||
|
||||
public void addHostList(Collection<Long> hostList) {
|
||||
if (_hostIds == null) {
|
||||
_hostIds = new HashSet<Long>();
|
||||
}
|
||||
_hostIds.addAll(hostList);
|
||||
}
|
||||
|
||||
public boolean shouldAvoid(Host host) {
|
||||
if (_dcIds != null && _dcIds.contains(host.getDataCenterId())) {
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -92,6 +92,8 @@ public class EventTypes {
|
|||
public static final String EVENT_PROXY_STOP = "PROXY.STOP";
|
||||
public static final String EVENT_PROXY_REBOOT = "PROXY.REBOOT";
|
||||
public static final String EVENT_PROXY_HA = "PROXY.HA";
|
||||
public static final String EVENT_PROXY_SCALE = "PROXY.SCALE";
|
||||
|
||||
|
||||
// VNC Console Events
|
||||
public static final String EVENT_VNC_CONNECT = "VNC.CONNECT";
|
||||
|
|
@ -213,6 +215,7 @@ public class EventTypes {
|
|||
public static final String EVENT_SSVM_STOP = "SSVM.STOP";
|
||||
public static final String EVENT_SSVM_REBOOT = "SSVM.REBOOT";
|
||||
public static final String EVENT_SSVM_HA = "SSVM.HA";
|
||||
public static final String EVENT_SSVM_SCALE = "SSVM.SCALE";
|
||||
|
||||
// Service Offerings
|
||||
public static final String EVENT_SERVICE_OFFERING_CREATE = "SERVICE.OFFERING.CREATE";
|
||||
|
|
@ -423,6 +426,7 @@ public class EventTypes {
|
|||
public static final String EVENT_INTERNAL_LB_VM_START = "INTERNALLBVM.START";
|
||||
public static final String EVENT_INTERNAL_LB_VM_STOP = "INTERNALLBVM.STOP";
|
||||
|
||||
public static final String EVENT_HOST_RESERVATION_RELEASE = "HOST.RESERVATION.RELEASE";
|
||||
// Dedicated guest vlan range
|
||||
public static final String EVENT_GUEST_VLAN_RANGE_DEDICATE = "GUESTVLANRANGE.DEDICATE";
|
||||
public static final String EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE = "GUESTVLANRANGE.RELEASE";
|
||||
|
|
@ -728,7 +732,6 @@ public class EventTypes {
|
|||
entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_UPDATE, AutoScaleVmGroup.class.getName());
|
||||
entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_ENABLE, AutoScaleVmGroup.class.getName());
|
||||
entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_DISABLE, AutoScaleVmGroup.class.getName());
|
||||
|
||||
entityEventDetails.put(EVENT_GUEST_VLAN_RANGE_DEDICATE, GuestVlan.class.getName());
|
||||
entityEventDetails.put(EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE, GuestVlan.class.getName());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,4 +108,6 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity,
|
|||
boolean getDefaultUse();
|
||||
|
||||
String getSystemVmType();
|
||||
|
||||
String getDeploymentPlanner();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,11 +100,13 @@ public interface ResourceService {
|
|||
Swift discoverSwift(AddSwiftCmd addSwiftCmd) throws DiscoveryException;
|
||||
|
||||
S3 discoverS3(AddS3Cmd cmd) throws DiscoveryException;
|
||||
|
||||
|
||||
List<HypervisorType> getSupportedHypervisorTypes(long zoneId, boolean forVirtualRouter, Long podId);
|
||||
|
||||
Pair<List<? extends Swift>, Integer> listSwifts(ListSwiftsCmd cmd);
|
||||
|
||||
List<? extends S3> listS3s(ListS3sCmd cmd);
|
||||
|
||||
boolean releaseHostReservation(Long hostId);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.cloud.exception.*;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd;
|
||||
import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd;
|
||||
|
|
@ -34,11 +35,7 @@ import org.apache.cloudstack.api.command.admin.resource.DeleteAlertsCmd;
|
|||
import org.apache.cloudstack.api.command.admin.resource.ListAlertsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.resource.ListCapacityCmd;
|
||||
import org.apache.cloudstack.api.command.admin.resource.UploadCustomCertificateCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.ListSystemVMsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.RebootSystemVmCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.StopSystemVmCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.UpgradeSystemVMCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.*;
|
||||
import org.apache.cloudstack.api.command.admin.vlan.ListVlanIpRangesCmd;
|
||||
import org.apache.cloudstack.api.command.user.address.ListPublicIpAddressesCmd;
|
||||
import org.apache.cloudstack.api.command.user.config.ListCapabilitiesCmd;
|
||||
|
|
@ -64,10 +61,6 @@ import com.cloud.configuration.Configuration;
|
|||
import com.cloud.dc.Pod;
|
||||
import com.cloud.dc.Vlan;
|
||||
import com.cloud.domain.Domain;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.HypervisorCapabilities;
|
||||
|
|
@ -419,5 +412,8 @@ public interface ManagementService {
|
|||
* @return List of capacities
|
||||
*/
|
||||
List<? extends Capacity> listTopConsumedResources(ListCapacityCmd cmd);
|
||||
|
||||
List<String> listDeploymentPlanners();
|
||||
|
||||
VirtualMachine upgradeSystemVM(ScaleSystemVMCmd cmd) throws ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException, ConcurrentOperationException;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -461,6 +461,6 @@ public interface UserVmService {
|
|||
|
||||
UserVm restoreVM(RestoreVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException;
|
||||
|
||||
boolean upgradeVirtualMachine(ScaleVMCmd scaleVMCmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
|
||||
UserVm upgradeVirtualMachine(ScaleVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -312,6 +312,7 @@ public class ApiConstants {
|
|||
public static final String ACCEPT = "accept";
|
||||
public static final String SORT_KEY = "sortkey";
|
||||
public static final String ACCOUNT_DETAILS = "accountdetails";
|
||||
public static final String SERVICE_OFFERING_DETAILS = "serviceofferingdetails";
|
||||
public static final String SERVICE_PROVIDER_LIST = "serviceproviderlist";
|
||||
public static final String SERVICE_CAPABILITY_LIST = "servicecapabilitylist";
|
||||
public static final String CAN_CHOOSE_SERVICE_CAPABILITY = "canchooseservicecapability";
|
||||
|
|
@ -496,6 +497,7 @@ public class ApiConstants {
|
|||
public static final String AFFINITY_GROUP_NAMES = "affinitygroupnames";
|
||||
public static final String ASA_INSIDE_PORT_PROFILE = "insideportprofile";
|
||||
public static final String AFFINITY_GROUP_ID = "affinitygroupid";
|
||||
public static final String DEPLOYMENT_PLANNER = "deploymentplanner";
|
||||
public static final String ACL_ID = "aclid";
|
||||
public static final String NUMBER = "number";
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,71 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.command.admin.config;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.BaseListCmd;
|
||||
import org.apache.cloudstack.api.response.DeploymentPlannersResponse;
|
||||
import org.apache.cloudstack.api.response.ListResponse;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class)
|
||||
public class ListDeploymentPlannersCmd extends BaseListCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName());
|
||||
|
||||
private static final String s_name = "listdeploymentplannersresponse";
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(){
|
||||
List<String> planners = _mgr.listDeploymentPlanners();
|
||||
ListResponse<DeploymentPlannersResponse> response = new ListResponse<DeploymentPlannersResponse>();
|
||||
List<DeploymentPlannersResponse> plannerResponses = new ArrayList<DeploymentPlannersResponse>();
|
||||
|
||||
for (String planner : planners) {
|
||||
DeploymentPlannersResponse plannerResponse = new DeploymentPlannersResponse();
|
||||
plannerResponse.setName(planner);
|
||||
plannerResponse.setObjectName("deploymentPlanner");
|
||||
plannerResponses.add(plannerResponse);
|
||||
}
|
||||
|
||||
response.setResponses(plannerResponses);
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -45,7 +45,7 @@ public class FindHostsForMigrationCmd extends BaseListCmd {
|
|||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType = UserVmResponse.class,
|
||||
required=false, description="find hosts to which this VM can be migrated and flag the hosts with enough " +
|
||||
required=true, description="find hosts to which this VM can be migrated and flag the hosts with enough " +
|
||||
"CPU/RAM to host the VM")
|
||||
private Long virtualMachineId;
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,105 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.command.admin.host;
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.BaseAsyncCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.response.HostResponse;
|
||||
import org.apache.cloudstack.api.response.SuccessResponse;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.async.AsyncJob;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.UserContext;
|
||||
|
||||
@APICommand(name = "releaseHostReservation", description = "Releases host reservation.", responseObject = SuccessResponse.class)
|
||||
public class ReleaseHostReservationCmd extends BaseAsyncCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(ReleaseHostReservationCmd.class.getName());
|
||||
|
||||
private static final String s_name = "releasehostreservationresponse";
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=HostResponse.class,
|
||||
required=true, description="the host ID")
|
||||
private Long id;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEntityOwnerId() {
|
||||
Account account = UserContext.current().getCaller();
|
||||
if (account != null) {
|
||||
return account.getId();
|
||||
}
|
||||
|
||||
return Account.ACCOUNT_ID_SYSTEM;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventType() {
|
||||
return EventTypes.EVENT_HOST_RESERVATION_RELEASE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "releasing reservation for host: " + getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncJob.Type getInstanceType() {
|
||||
return AsyncJob.Type.Host;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getInstanceId() {
|
||||
return getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(){
|
||||
boolean result = _resourceService.releaseHostReservation(getId());
|
||||
if (result) {
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release host reservation");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -44,7 +44,7 @@ public class ListNetworkIsolationMethodsCmd extends BaseListCmd{
|
|||
isolationResponses.add(isolationMethod);
|
||||
}
|
||||
}
|
||||
response.setResponses(isolationResponses, methods.length);
|
||||
response.setResponses(isolationResponses, isolationResponses.size());
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.api.command.admin.offering;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
|
|
@ -84,6 +87,12 @@ public class CreateServiceOfferingCmd extends BaseCmd {
|
|||
@Parameter(name=ApiConstants.NETWORKRATE, type=CommandType.INTEGER, description="data transfer rate in megabits per second allowed. Supported only for non-System offering and system offerings having \"domainrouter\" systemvmtype")
|
||||
private Integer networkRate;
|
||||
|
||||
@Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "The deployment planner heuristics used to deploy a VM of this offering. If null, value of global config vm.deployment.planner is used")
|
||||
private String deploymentPlanner;
|
||||
|
||||
@Parameter(name = ApiConstants.SERVICE_OFFERING_DETAILS, type = CommandType.MAP, description = "details for planner, used to store specific parameters")
|
||||
private Map<String, String> details;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -148,6 +157,19 @@ public class CreateServiceOfferingCmd extends BaseCmd {
|
|||
return networkRate;
|
||||
}
|
||||
|
||||
public String getDeploymentPlanner() {
|
||||
return deploymentPlanner;
|
||||
}
|
||||
|
||||
public Map<String, String> getDetails() {
|
||||
if (details == null || details.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Collection<String> paramsCollection = details.values();
|
||||
Map<String, String> params = (Map<String, String>)(paramsCollection.toArray())[0];
|
||||
return params;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
|
|
|
|||
|
|
@ -0,0 +1,131 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.command.admin.systemvm;
|
||||
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.exception.*;
|
||||
import org.apache.cloudstack.api.*;
|
||||
import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd;
|
||||
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
|
||||
import org.apache.cloudstack.api.response.SystemVmResponse;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.UserContext;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
|
||||
@APICommand(name = "scaleSystemVm", responseObject=SystemVmResponse.class, description="Scale the service offering for a system vm (console proxy or secondary storage). " +
|
||||
"The system vm must be in a \"Stopped\" state for " +
|
||||
"this command to take effect.")
|
||||
public class ScaleSystemVMCmd extends BaseAsyncCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(UpgradeVMCmd.class.getName());
|
||||
private static final String s_name = "changeserviceforsystemvmresponse";
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=SystemVmResponse.class,
|
||||
required=true, description="The ID of the system vm")
|
||||
private Long id;
|
||||
|
||||
@Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType=ServiceOfferingResponse.class,
|
||||
required=true, description="the service offering ID to apply to the system vm")
|
||||
private Long serviceOfferingId;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public Long getServiceOfferingId() {
|
||||
return serviceOfferingId;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEntityOwnerId() {
|
||||
Account account = UserContext.current().getCaller();
|
||||
if (account != null) {
|
||||
return account.getId();
|
||||
}
|
||||
|
||||
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(){
|
||||
UserContext.current().setEventDetails("SystemVm Id: "+getId());
|
||||
|
||||
ServiceOffering serviceOffering = _configService.getServiceOffering(serviceOfferingId);
|
||||
if (serviceOffering == null) {
|
||||
throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId);
|
||||
}
|
||||
|
||||
VirtualMachine result = null;
|
||||
try {
|
||||
result = _mgr.upgradeSystemVM(this);
|
||||
} catch (ResourceUnavailableException ex) {
|
||||
s_logger.warn("Exception: ", ex);
|
||||
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
|
||||
} catch (ConcurrentOperationException ex) {
|
||||
s_logger.warn("Exception: ", ex);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
|
||||
} catch (ManagementServerException ex) {
|
||||
s_logger.warn("Exception: ", ex);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
|
||||
} catch (VirtualMachineMigrationException ex) {
|
||||
s_logger.warn("Exception: ", ex);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
|
||||
}
|
||||
if (result != null) {
|
||||
SystemVmResponse response = _responseGenerator.createSystemVmResponse(result);
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to scale system vm");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventType() {
|
||||
VirtualMachine.Type type = _mgr.findSystemVMTypeById(getId());
|
||||
if(type == VirtualMachine.Type.ConsoleProxy){
|
||||
return EventTypes.EVENT_PROXY_SCALE;
|
||||
}
|
||||
else{
|
||||
return EventTypes.EVENT_SSVM_SCALE;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "scaling system vm: " + getId() + " to service offering: " + getServiceOfferingId();
|
||||
}
|
||||
}
|
||||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.api.command.user.vm;
|
||||
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.exception.*;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.UserContext;
|
||||
|
|
@ -26,9 +27,11 @@ import org.apache.cloudstack.api.response.SuccessResponse;
|
|||
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
||||
@APICommand(name = "scaleVirtualMachine", description="Scales the virtual machine to a new service offering.", responseObject=SuccessResponse.class)
|
||||
public class ScaleVMCmd extends BaseCmd {
|
||||
public class ScaleVMCmd extends BaseAsyncCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(ScaleVMCmd.class.getName());
|
||||
private static final String s_name = "scalevirtualmachineresponse";
|
||||
|
||||
|
|
@ -84,7 +87,7 @@ public class ScaleVMCmd extends BaseCmd {
|
|||
@Override
|
||||
public void execute(){
|
||||
//UserContext.current().setEventDetails("Vm Id: "+getId());
|
||||
boolean result;
|
||||
UserVm result;
|
||||
try {
|
||||
result = _userVmService.upgradeVirtualMachine(this);
|
||||
} catch (ResourceUnavailableException ex) {
|
||||
|
|
@ -100,11 +103,23 @@ public class ScaleVMCmd extends BaseCmd {
|
|||
s_logger.warn("Exception: ", ex);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
|
||||
}
|
||||
if (result){
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
if (result != null){
|
||||
List<UserVmResponse> responseList = _responseGenerator.createUserVmResponse("virtualmachine", result);
|
||||
UserVmResponse response = responseList.get(0);
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to scale vm");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventType() {
|
||||
return EventTypes.EVENT_VM_SCALE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "scaling volume: " + getId() + " to service offering: " + getServiceOfferingId();
|
||||
}
|
||||
}
|
||||
|
|
@ -18,6 +18,8 @@ package org.apache.cloudstack.api.response;
|
|||
|
||||
import java.util.Date;
|
||||
|
||||
import javax.persistence.Column;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseResponse;
|
||||
import org.apache.cloudstack.api.EntityReference;
|
||||
|
|
@ -82,6 +84,8 @@ public class ServiceOfferingResponse extends BaseResponse {
|
|||
@SerializedName(ApiConstants.NETWORKRATE) @Param(description="data transfer rate in megabits per second allowed.")
|
||||
private Integer networkRate;
|
||||
|
||||
@SerializedName(ApiConstants.DEPLOYMENT_PLANNER) @Param(description="deployment strategy used to deploy VM.")
|
||||
private String deploymentPlanner;
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
|
|
@ -225,4 +229,12 @@ public class ServiceOfferingResponse extends BaseResponse {
|
|||
public void setNetworkRate(Integer networkRate) {
|
||||
this.networkRate = networkRate;
|
||||
}
|
||||
|
||||
public String getDeploymentPlanner() {
|
||||
return deploymentPlanner;
|
||||
}
|
||||
|
||||
public void setDeploymentPlanner(String deploymentPlanner) {
|
||||
this.deploymentPlanner = deploymentPlanner;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,11 +24,18 @@ import org.apache.cloudstack.api.ResponseGenerator;
|
|||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.command.user.vm.ScaleVMCmd;
|
||||
|
||||
import org.apache.cloudstack.api.response.SwiftResponse;
|
||||
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
import org.mockito.Mockito;
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
public class ScaleVMCmdTest extends TestCase{
|
||||
|
||||
|
|
@ -58,19 +65,34 @@ public class ScaleVMCmdTest extends TestCase{
|
|||
public void testCreateSuccess() {
|
||||
|
||||
UserVmService userVmService = Mockito.mock(UserVmService.class);
|
||||
UserVm userVm = Mockito.mock(UserVm.class);
|
||||
|
||||
|
||||
try {
|
||||
Mockito.when(
|
||||
userVmService.upgradeVirtualMachine(scaleVMCmd))
|
||||
.thenReturn(true);
|
||||
.thenReturn(userVm);
|
||||
}catch (Exception e){
|
||||
Assert.fail("Received exception when success expected " +e.getMessage());
|
||||
}
|
||||
|
||||
scaleVMCmd._userVmService = userVmService;
|
||||
responseGenerator = Mockito.mock(ResponseGenerator.class);
|
||||
|
||||
ResponseGenerator responseGenerator = Mockito.mock(ResponseGenerator.class);
|
||||
scaleVMCmd._responseGenerator = responseGenerator;
|
||||
|
||||
UserVmResponse userVmResponse = Mockito.mock(UserVmResponse.class);
|
||||
//List<UserVmResponse> list = Mockito.mock(UserVmResponse.class);
|
||||
//list.add(userVmResponse);
|
||||
//LinkedList<UserVmResponse> mockedList = Mockito.mock(LinkedList.class);
|
||||
//Mockito.when(mockedList.get(0)).thenReturn(userVmResponse);
|
||||
|
||||
List<UserVmResponse> list = new LinkedList<UserVmResponse>();
|
||||
list.add(userVmResponse);
|
||||
|
||||
Mockito.when(responseGenerator.createUserVmResponse("virtualmachine", userVm)).thenReturn(
|
||||
list);
|
||||
|
||||
scaleVMCmd._userVmService = userVmService;
|
||||
|
||||
scaleVMCmd.execute();
|
||||
|
||||
}
|
||||
|
|
@ -83,7 +105,7 @@ public class ScaleVMCmdTest extends TestCase{
|
|||
try {
|
||||
Mockito.when(
|
||||
userVmService.upgradeVirtualMachine(scaleVMCmd))
|
||||
.thenReturn(false);
|
||||
.thenReturn(null);
|
||||
}catch (Exception e){
|
||||
Assert.fail("Received exception when success expected " +e.getMessage());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,15 +19,14 @@ package com.cloud.bridge.persist.dao;
|
|||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.bridge.model.CloudStackUserVO;
|
||||
import com.cloud.bridge.util.EncryptionSecretKeyCheckerUtil;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||
|
||||
@Component
|
||||
@Local(value={CloudStackUserDao.class})
|
||||
|
|
@ -51,13 +50,8 @@ public class CloudStackUserDaoImpl extends GenericDaoBase<CloudStackUserVO, Stri
|
|||
sc.setParameters("apiKey", accessKey);
|
||||
user = findOneBy(sc);
|
||||
if ( user != null && user.getSecretKey() != null) {
|
||||
// if the cloud db is encrypted, decrypt the secret_key returned by cloud db before signature generation
|
||||
if( EncryptionSecretKeyCheckerUtil.useEncryption() ) {
|
||||
StandardPBEStringEncryptor encryptor = EncryptionSecretKeyCheckerUtil.getEncryptor();
|
||||
cloudSecretKey = encryptor.decrypt( user.getSecretKey() );
|
||||
} else {
|
||||
cloudSecretKey = user.getSecretKey();
|
||||
}
|
||||
// User secret key could be encrypted
|
||||
cloudSecretKey = DBEncryptionUtil.decrypt(user.getSecretKey());
|
||||
}
|
||||
return cloudSecretKey;
|
||||
} finally {
|
||||
|
|
|
|||
|
|
@ -131,6 +131,11 @@
|
|||
<artifactId>cloud-plugin-planner-user-concentrated-pod</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-planner-implicit-dedication</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-host-allocator-random</artifactId>
|
||||
|
|
|
|||
|
|
@ -370,7 +370,7 @@
|
|||
<bean id="vpnUserDaoImpl" class="com.cloud.network.dao.VpnUserDaoImpl" />
|
||||
<bean id="applicationLbRuleDaoImpl" class="org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDaoImpl" />
|
||||
<bean id="networkOfferingDetailsDaoImpl" class="com.cloud.offerings.dao.NetworkOfferingDetailsDaoImpl" />
|
||||
|
||||
<bean id="serviceOfferingDetailsDaoImpl" class="com.cloud.service.dao.ServiceOfferingDetailsDaoImpl"/>
|
||||
|
||||
<!--
|
||||
Checkers
|
||||
|
|
@ -540,15 +540,15 @@
|
|||
Deployment planners
|
||||
-->
|
||||
<bean id="UserDispersingPlanner" class="com.cloud.deploy.UserDispersingPlanner">
|
||||
<property name="name" value="UserDispersing"/>
|
||||
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
|
||||
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
|
||||
<property name="name" value="UserDispersingPlanner"/>
|
||||
</bean>
|
||||
|
||||
<bean id="UserConcentratedPodPlanner" class="com.cloud.deploy.UserConcentratedPodPlanner">
|
||||
<property name="name" value="UserConcentratedPod"/>
|
||||
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
|
||||
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
|
||||
<property name="name" value="UserConcentratedPodPlanner"/>
|
||||
</bean>
|
||||
|
||||
<bean id="ImplicitDedicationPlanner" class="com.cloud.deploy.ImplicitDedicationPlanner">
|
||||
<property name="name" value="ImplicitDedicationPlanner"/>
|
||||
</bean>
|
||||
|
||||
<bean id="clusterBasedAgentLoadBalancerPlanner" class="com.cloud.cluster.agentlb.ClusterBasedAgentLoadBalancerPlanner">
|
||||
|
|
@ -605,10 +605,6 @@
|
|||
<property name="name" value="OvmGuru"/>
|
||||
</bean>
|
||||
|
||||
<bean id="HypervisorPlannerSelector" class="com.cloud.deploy.HypervisorVmPlannerSelector">
|
||||
<property name="name" value="HypervisorPlannerSelector"/>
|
||||
</bean>
|
||||
|
||||
<!--
|
||||
Managers
|
||||
-->
|
||||
|
|
@ -623,6 +619,7 @@
|
|||
<property name="UserPasswordEncoders" value="#{userPasswordEncoders.Adapters}" />
|
||||
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
|
||||
<property name="AffinityGroupProcessors" value="#{affinityProcessors.Adapters}" />
|
||||
<property name="Planners" value="#{deploymentPlanners.Adapters}" />
|
||||
</bean>
|
||||
|
||||
<bean id="storageManagerImpl" class="com.cloud.storage.StorageManagerImpl">
|
||||
|
|
@ -630,9 +627,7 @@
|
|||
</bean>
|
||||
|
||||
<bean id="FirstFitPlanner" class="com.cloud.deploy.FirstFitPlanner">
|
||||
<property name="name" value="First Fit"/>
|
||||
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
|
||||
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
|
||||
<property name="name" value="FirstFitPlanner"/>
|
||||
</bean>
|
||||
|
||||
<bean id="resourceManagerImpl" class="com.cloud.resource.ResourceManagerImpl" >
|
||||
|
|
@ -833,17 +828,13 @@
|
|||
</bean>
|
||||
|
||||
<bean id="BareMetalPlanner" class="com.cloud.baremetal.manager.BareMetalPlanner">
|
||||
<property name="name" value="BareMetal Fit"/>
|
||||
<property name="name" value="BareMetalPlanner"/>
|
||||
</bean>
|
||||
|
||||
<bean id="BaremetalGuru" class="com.cloud.baremetal.manager.BareMetalGuru">
|
||||
<property name="name" value="BaremetalGuru"/>
|
||||
</bean>
|
||||
|
||||
<bean id="BaremetalPlannerSelector" class="com.cloud.baremetal.manager.BaremetalPlannerSelector">
|
||||
<property name="name" value="BaremetalPlannerSelector"/>
|
||||
</bean>
|
||||
|
||||
<bean id="BaremetalManager" class="com.cloud.baremetal.manager.BaremetalManagerImpl"/>
|
||||
<bean id="BaremetalDhcpManager" class="com.cloud.baremetal.networkservice.BaremetalDhcpManagerImpl"/>
|
||||
<bean id="BaremetalKickStartPxeService" class="com.cloud.baremetal.networkservice.BaremetalKickStartServiceImpl"/>
|
||||
|
|
@ -859,6 +850,8 @@
|
|||
<bean id="DeploymentPlanningManager" class="com.cloud.deploy.DeploymentPlanningManagerImpl">
|
||||
<property name="Planners" value="#{deploymentPlanners.Adapters}" />
|
||||
<property name="AffinityGroupProcessors" value="#{affinityProcessors.Adapters}" />
|
||||
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
|
||||
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
|
||||
</bean>
|
||||
|
||||
<bean id="AffinityGroupJoinDaoImpl" class="com.cloud.api.query.dao.AffinityGroupJoinDaoImpl">
|
||||
|
|
@ -868,4 +861,7 @@
|
|||
<bean id="AffinityGroupVMMapDaoImpl" class="org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDaoImpl">
|
||||
</bean>
|
||||
|
||||
<bean id="PlannerHostReservationDaoImpl" class="com.cloud.deploy.dao.PlannerHostReservationDaoImpl">
|
||||
</bean>
|
||||
|
||||
</beans>
|
||||
|
|
|
|||
|
|
@ -205,6 +205,7 @@ destroySystemVm=1
|
|||
listSystemVms=3
|
||||
migrateSystemVm=1
|
||||
changeServiceForSystemVm=1
|
||||
scaleSystemVm=1
|
||||
|
||||
#### configuration commands
|
||||
updateConfiguration=1
|
||||
|
|
@ -212,6 +213,7 @@ listConfigurations=1
|
|||
ldapConfig=1
|
||||
ldapRemove=1
|
||||
listCapabilities=15
|
||||
listDeploymentPlanners=1
|
||||
|
||||
#### pod commands
|
||||
createPod=1
|
||||
|
|
@ -261,6 +263,7 @@ listHosts=3
|
|||
findHostsForMigration=1
|
||||
addSecondaryStorage=1
|
||||
updateHostPassword=1
|
||||
releaseHostReservation=1
|
||||
|
||||
#### volume commands
|
||||
attachVolume=15
|
||||
|
|
|
|||
|
|
@ -156,6 +156,7 @@
|
|||
<ref bean="FirstFitPlanner" />
|
||||
<ref bean="UserDispersingPlanner" />
|
||||
<ref bean="UserConcentratedPodPlanner" />
|
||||
<ref bean="ImplicitDedicationPlanner" />
|
||||
<!--
|
||||
<ref bean="BareMetalPlanner" />
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -17,11 +17,13 @@
|
|||
package com.cloud.agent.api;
|
||||
|
||||
import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
|
||||
public class PlugNicCommand extends Command {
|
||||
|
||||
NicTO nic;
|
||||
String instanceName;
|
||||
VirtualMachine.Type vmType;
|
||||
|
||||
public NicTO getNic() {
|
||||
return nic;
|
||||
|
|
@ -35,12 +37,17 @@ public class PlugNicCommand extends Command {
|
|||
protected PlugNicCommand() {
|
||||
}
|
||||
|
||||
public PlugNicCommand(NicTO nic, String instanceName) {
|
||||
public PlugNicCommand(NicTO nic, String instanceName, VirtualMachine.Type vmtype) {
|
||||
this.nic = nic;
|
||||
this.instanceName = instanceName;
|
||||
this.vmType = vmtype;
|
||||
}
|
||||
|
||||
public String getVmName() {
|
||||
return instanceName;
|
||||
}
|
||||
|
||||
public VirtualMachine.Type getVMType() {
|
||||
return vmType;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,78 +26,91 @@
|
|||
choice from the EIP pool of your account. Later if required you can reassign the IP address to a
|
||||
different VM. This feature is extremely helpful during VM failure. Instead of replacing the VM
|
||||
which is down, the IP address can be reassigned to a new VM in your account. </para>
|
||||
<para>Similar to the public IP address, Elastic IP addresses are mapped to their associated
|
||||
private IP addresses by using StaticNAT. The EIP service is equipped with StaticNAT (1:1)
|
||||
service in an EIP-enabled basic zone. The default network offering,
|
||||
DefaultSharedNetscalerEIPandELBNetworkOffering, provides your network with EIP and ELB network
|
||||
services if a NetScaler device is deployed in your zone. Consider the following illustration for
|
||||
more details.</para>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="./images/eip-ns-basiczone.png"/>
|
||||
</imageobject>
|
||||
<textobject>
|
||||
<phrase>eip-ns-basiczone.png: Elastic IP in a NetScaler-enabled Basic Zone.</phrase>
|
||||
</textobject>
|
||||
</mediaobject>
|
||||
<para>In the illustration, a NetScaler appliance is the default entry or exit point for the
|
||||
&PRODUCT; instances, and firewall is the default entry or exit point for the rest of the data
|
||||
center. Netscaler provides LB services and staticNAT service to the guest networks. The guest
|
||||
traffic in the pods and the Management Server are on different subnets / VLANs. The policy-based
|
||||
routing in the data center core switch sends the public traffic through the NetScaler, whereas
|
||||
the rest of the data center goes through the firewall. </para>
|
||||
<para>The EIP work flow is as follows:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When a user VM is deployed, a public IP is automatically acquired from the pool of
|
||||
public IPs configured in the zone. This IP is owned by the VM's account.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Each VM will have its own private IP. When the user VM starts, Static NAT is provisioned
|
||||
on the NetScaler device by using the Inbound Network Address Translation (INAT) and Reverse
|
||||
NAT (RNAT) rules between the public IP and the private IP.</para>
|
||||
<note>
|
||||
<para>Inbound NAT (INAT) is a type of NAT supported by NetScaler, in which the destination
|
||||
IP address is replaced in the packets from the public network, such as the Internet, with
|
||||
the private IP address of a VM in the private network. Reverse NAT (RNAT) is a type of NAT
|
||||
supported by NetScaler, in which the source IP address is replaced in the packets
|
||||
generated by a VM in the private network with the public IP address.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>This default public IP will be released in two cases:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When the VM is stopped. When the VM starts, it again receives a new public IP, not
|
||||
necessarily the same one allocated initially, from the pool of Public IPs.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The user acquires a public IP (Elastic IP). This public IP is associated with the
|
||||
account, but will not be mapped to any private IP. However, the user can enable Static
|
||||
NAT to associate this IP to the private IP of a VM in the account. The Static NAT rule
|
||||
for the public IP can be disabled at any time. When Static NAT is disabled, a new public
|
||||
IP is allocated from the pool, which is not necessarily be the same one allocated
|
||||
initially.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For the deployments where public IPs are limited resources, you have the flexibility to
|
||||
choose not to allocate a public IP by default. You can use the Associate Public IP option to
|
||||
turn on or off the automatic public IP assignment in the EIP-enabled Basic zones. If you turn
|
||||
off the automatic public IP assignment while creating a network offering, only a private IP is
|
||||
assigned to a VM when the VM is deployed with that network offering. Later, the user can acquire
|
||||
an IP for the VM and enable static NAT.</para>
|
||||
<para condition="admin">For more information on the Associate Public IP option, see <xref
|
||||
linkend="creating-network-offerings"/>.</para>
|
||||
<para condition="install">For more information on the Associate Public IP option, see the
|
||||
Administration Guide.</para>
|
||||
<note>
|
||||
<para>The Associate Public IP feature is designed only for use with user VMs. The System VMs
|
||||
continue to get both public IP and private by default, irrespective of the network offering
|
||||
configuration.</para>
|
||||
</note>
|
||||
<para>New deployments which use the default shared network offering with EIP and ELB services to
|
||||
create a shared network in the Basic zone will continue allocating public IPs to each user
|
||||
VM.</para>
|
||||
<section id="about-eip">
|
||||
<title>Elastic IPs in Basic Zone</title>
|
||||
<para>Similar to the public IP address, Elastic IP addresses are mapped to their associated
|
||||
private IP addresses by using StaticNAT. The EIP service is equipped with StaticNAT (1:1)
|
||||
service in an EIP-enabled basic zone. The default network offering,
|
||||
DefaultSharedNetscalerEIPandELBNetworkOffering, provides your network with EIP and ELB network
|
||||
services if a NetScaler device is deployed in your zone. Consider the following illustration
|
||||
for more details.</para>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="./images/eip-ns-basiczone.png"/>
|
||||
</imageobject>
|
||||
<textobject>
|
||||
<phrase>eip-ns-basiczone.png: Elastic IP in a NetScaler-enabled Basic Zone.</phrase>
|
||||
</textobject>
|
||||
</mediaobject>
|
||||
<para>In the illustration, a NetScaler appliance is the default entry or exit point for the
|
||||
&PRODUCT; instances, and firewall is the default entry or exit point for the rest of the data
|
||||
center. Netscaler provides LB services and staticNAT service to the guest networks. The guest
|
||||
traffic in the pods and the Management Server are on different subnets / VLANs. The
|
||||
policy-based routing in the data center core switch sends the public traffic through the
|
||||
NetScaler, whereas the rest of the data center goes through the firewall. </para>
|
||||
<para>The EIP work flow is as follows:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When a user VM is deployed, a public IP is automatically acquired from the pool of
|
||||
public IPs configured in the zone. This IP is owned by the VM's account.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Each VM will have its own private IP. When the user VM starts, Static NAT is
|
||||
provisioned on the NetScaler device by using the Inbound Network Address Translation
|
||||
(INAT) and Reverse NAT (RNAT) rules between the public IP and the private IP.</para>
|
||||
<note>
|
||||
<para>Inbound NAT (INAT) is a type of NAT supported by NetScaler, in which the destination
|
||||
IP address is replaced in the packets from the public network, such as the Internet,
|
||||
with the private IP address of a VM in the private network. Reverse NAT (RNAT) is a type
|
||||
of NAT supported by NetScaler, in which the source IP address is replaced in the packets
|
||||
generated by a VM in the private network with the public IP address.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>This default public IP will be released in two cases:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When the VM is stopped. When the VM starts, it again receives a new public IP, not
|
||||
necessarily the same one allocated initially, from the pool of Public IPs.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The user acquires a public IP (Elastic IP). This public IP is associated with the
|
||||
account, but will not be mapped to any private IP. However, the user can enable Static
|
||||
NAT to associate this IP to the private IP of a VM in the account. The Static NAT rule
|
||||
for the public IP can be disabled at any time. When Static NAT is disabled, a new
|
||||
public IP is allocated from the pool, which is not necessarily be the same one
|
||||
allocated initially.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For the deployments where public IPs are limited resources, you have the flexibility to
|
||||
choose not to allocate a public IP by default. You can use the Associate Public IP option to
|
||||
turn on or off the automatic public IP assignment in the EIP-enabled Basic zones. If you turn
|
||||
off the automatic public IP assignment while creating a network offering, only a private IP is
|
||||
assigned to a VM when the VM is deployed with that network offering. Later, the user can
|
||||
acquire an IP for the VM and enable static NAT.</para>
|
||||
<para condition="admin">For more information on the Associate Public IP option, see <xref
|
||||
linkend="creating-network-offerings"/>.</para>
|
||||
<para condition="install">For more information on the Associate Public IP option, see the
|
||||
Administration Guide.</para>
|
||||
<note>
|
||||
<para>The Associate Public IP feature is designed only for use with user VMs. The System VMs
|
||||
continue to get both public IP and private by default, irrespective of the network offering
|
||||
configuration.</para>
|
||||
</note>
|
||||
<para>New deployments which use the default shared network offering with EIP and ELB services to
|
||||
create a shared network in the Basic zone will continue allocating public IPs to each user
|
||||
VM.</para>
|
||||
</section>
|
||||
<section id="portable-ip">
|
||||
<title>About Portable IP</title>
|
||||
<para>Portable IPs in &PRODUCT; are nothing but elastic IPs that can be transferred across
|
||||
geographically separated zones. As an administrator, you can provision a pool of portable IPs
|
||||
at region level and are available for user consumption. The users can acquire portable IPs if
|
||||
admin has provisioned portable public IPs at the region level they are part of. These IPs can
|
||||
be use for any service within an advanced zone. You can also use portable IPs for EIP service
|
||||
in basic zones. Additionally, a portable IP can be transferred from one network to another
|
||||
network.</para>
|
||||
</section>
|
||||
</section>
|
||||
|
|
|
|||
|
|
@ -174,5 +174,10 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDeploymentPlanner() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,73 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.service;
|
||||
|
||||
import org.apache.cloudstack.api.InternalIdentity;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.GenerationType;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.Table;
|
||||
|
||||
@Entity
|
||||
@Table(name="service_offering_details")
|
||||
public class ServiceOfferingDetailsVO implements InternalIdentity {
|
||||
@Id
|
||||
@GeneratedValue(strategy=GenerationType.IDENTITY)
|
||||
@Column(name="id")
|
||||
private long id;
|
||||
|
||||
@Column(name="service_offering_id")
|
||||
private long serviceOfferingId;
|
||||
|
||||
@Column(name="name")
|
||||
private String name;
|
||||
|
||||
@Column(name="value")
|
||||
private String value;
|
||||
|
||||
protected ServiceOfferingDetailsVO() {
|
||||
}
|
||||
|
||||
public ServiceOfferingDetailsVO(long serviceOfferingId, String name, String value) {
|
||||
this.serviceOfferingId = serviceOfferingId;
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public long getServiceOfferingId() {
|
||||
return serviceOfferingId;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public void setValue(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
|
@ -16,6 +16,8 @@
|
|||
// under the License.
|
||||
package com.cloud.service;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.DiscriminatorValue;
|
||||
import javax.persistence.Entity;
|
||||
|
|
@ -68,6 +70,15 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering
|
|||
@Column(name="sort_key")
|
||||
int sortKey;
|
||||
|
||||
@Column(name = "deployment_planner")
|
||||
private String deploymentPlanner = null;
|
||||
|
||||
// This is a delayed load value. If the value is null,
|
||||
// then this field has not been loaded yet.
|
||||
// Call service offering dao to load it.
|
||||
@Transient
|
||||
Map<String, String> details;
|
||||
|
||||
protected ServiceOfferingVO() {
|
||||
super();
|
||||
}
|
||||
|
|
@ -104,6 +115,15 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering
|
|||
this.hostTag = hostTag;
|
||||
}
|
||||
|
||||
public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps,
|
||||
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String displayText, boolean useLocalStorage,
|
||||
boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId,
|
||||
String hostTag, String deploymentPlanner) {
|
||||
this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, volatileVm,
|
||||
displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId, hostTag);
|
||||
this.deploymentPlanner = deploymentPlanner;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getOfferHA() {
|
||||
return offerHA;
|
||||
|
|
@ -208,4 +228,28 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering
|
|||
return volatileVm;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDeploymentPlanner() {
|
||||
return deploymentPlanner;
|
||||
}
|
||||
|
||||
public Map<String, String> getDetails() {
|
||||
return details;
|
||||
}
|
||||
|
||||
public String getDetail(String name) {
|
||||
assert (details != null) : "Did you forget to load the details?";
|
||||
|
||||
return details != null ? details.get(name) : null;
|
||||
}
|
||||
|
||||
public void setDetail(String name, String value) {
|
||||
assert (details != null) : "Did you forget to load the details?";
|
||||
|
||||
details.put(name, value);
|
||||
}
|
||||
|
||||
public void setDetails(Map<String, String> details) {
|
||||
this.details = details;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,4 +31,6 @@ public interface ServiceOfferingDao extends GenericDao<ServiceOfferingVO, Long>
|
|||
List<ServiceOfferingVO> findServiceOfferingByDomainId(Long domainId);
|
||||
List<ServiceOfferingVO> findSystemOffering(Long domainId, Boolean isSystem, String vm_type);
|
||||
ServiceOfferingVO persistDeafultServiceOffering(ServiceOfferingVO offering);
|
||||
void loadDetails(ServiceOfferingVO serviceOffering);
|
||||
void saveDetails(ServiceOfferingVO serviceOffering);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,15 +18,16 @@ package com.cloud.service.dao;
|
|||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
import javax.persistence.EntityExistsException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
|
|
@ -37,6 +38,8 @@ import com.cloud.utils.db.SearchCriteria;
|
|||
public class ServiceOfferingDaoImpl extends GenericDaoBase<ServiceOfferingVO, Long> implements ServiceOfferingDao {
|
||||
protected static final Logger s_logger = Logger.getLogger(ServiceOfferingDaoImpl.class);
|
||||
|
||||
@Inject protected ServiceOfferingDetailsDao detailsDao;
|
||||
|
||||
protected final SearchBuilder<ServiceOfferingVO> UniqueNameSearch;
|
||||
protected final SearchBuilder<ServiceOfferingVO> ServiceOfferingsByDomainIdSearch;
|
||||
protected final SearchBuilder<ServiceOfferingVO> SystemServiceOffering;
|
||||
|
|
@ -154,4 +157,18 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase<ServiceOfferingVO, Lo
|
|||
|
||||
return update(id, offering);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void loadDetails(ServiceOfferingVO serviceOffering) {
|
||||
Map<String, String> details = detailsDao.findDetails(serviceOffering.getId());
|
||||
serviceOffering.setDetails(details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void saveDetails(ServiceOfferingVO serviceOffering) {
|
||||
Map<String, String> details = serviceOffering.getDetails();
|
||||
if (details != null) {
|
||||
detailsDao.persist(serviceOffering.getId(), details);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,26 +14,16 @@
|
|||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.baremetal.manager;
|
||||
package com.cloud.service.dao;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
import com.cloud.service.ServiceOfferingDetailsVO;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
import com.cloud.deploy.AbstractDeployPlannerSelector;
|
||||
import com.cloud.deploy.DeployPlannerSelector;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
@Local(value = {DeployPlannerSelector.class})
|
||||
public class BaremetalPlannerSelector extends AbstractDeployPlannerSelector{
|
||||
|
||||
@Override
|
||||
public String selectPlanner(UserVmVO vm) {
|
||||
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
|
||||
return "BareMetalPlanner";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
public interface ServiceOfferingDetailsDao extends GenericDao<ServiceOfferingDetailsVO, Long> {
|
||||
Map<String, String> findDetails(long serviceOfferingId);
|
||||
void persist(long serviceOfferingId, Map<String, String> details);
|
||||
ServiceOfferingDetailsVO findDetail(long serviceOfferingId, String name);
|
||||
void deleteDetails(long serviceOfferingId);
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.service.dao;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.service.ServiceOfferingDetailsVO;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
|
||||
@Component
|
||||
@Local(value=ServiceOfferingDetailsDao.class)
|
||||
public class ServiceOfferingDetailsDaoImpl extends GenericDaoBase<ServiceOfferingDetailsVO, Long>
|
||||
implements ServiceOfferingDetailsDao {
|
||||
protected final SearchBuilder<ServiceOfferingDetailsVO> ServiceOfferingSearch;
|
||||
protected final SearchBuilder<ServiceOfferingDetailsVO> DetailSearch;
|
||||
|
||||
public ServiceOfferingDetailsDaoImpl() {
|
||||
ServiceOfferingSearch = createSearchBuilder();
|
||||
ServiceOfferingSearch.and("serviceOfferingId", ServiceOfferingSearch.entity().getServiceOfferingId(), SearchCriteria.Op.EQ);
|
||||
ServiceOfferingSearch.done();
|
||||
|
||||
DetailSearch = createSearchBuilder();
|
||||
DetailSearch.and("serviceOfferingId", DetailSearch.entity().getServiceOfferingId(), SearchCriteria.Op.EQ);
|
||||
DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ);
|
||||
DetailSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServiceOfferingDetailsVO findDetail(long serviceOfferingId, String name) {
|
||||
SearchCriteria<ServiceOfferingDetailsVO> sc = DetailSearch.create();
|
||||
sc.setParameters("serviceOfferingId", serviceOfferingId);
|
||||
sc.setParameters("name", name);
|
||||
ServiceOfferingDetailsVO detail = findOneIncludingRemovedBy(sc);
|
||||
return detail;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> findDetails(long serviceOfferingId) {
|
||||
SearchCriteria<ServiceOfferingDetailsVO> sc = ServiceOfferingSearch.create();
|
||||
sc.setParameters("serviceOfferingId", serviceOfferingId);
|
||||
List<ServiceOfferingDetailsVO> results = search(sc, null);
|
||||
Map<String, String> details = new HashMap<String, String>(results.size());
|
||||
for (ServiceOfferingDetailsVO result : results) {
|
||||
details.put(result.getName(), result.getValue());
|
||||
}
|
||||
|
||||
return details;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteDetails(long serviceOfferingId) {
|
||||
SearchCriteria sc = ServiceOfferingSearch.create();
|
||||
sc.setParameters("serviceOfferingId", serviceOfferingId);
|
||||
List<ServiceOfferingDetailsVO> results = search(sc, null);
|
||||
for (ServiceOfferingDetailsVO result : results) {
|
||||
remove(result.getId());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void persist(long serviceOfferingId, Map<String, String> details) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
txn.start();
|
||||
SearchCriteria<ServiceOfferingDetailsVO> sc = ServiceOfferingSearch.create();
|
||||
sc.setParameters("serviceOfferingId", serviceOfferingId);
|
||||
expunge(sc);
|
||||
|
||||
for (Map.Entry<String, String> detail : details.entrySet()) {
|
||||
String value = detail.getValue();
|
||||
ServiceOfferingDetailsVO vo = new ServiceOfferingDetailsVO(serviceOfferingId, detail.getKey(), value);
|
||||
persist(vo);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
|
@ -17,6 +17,10 @@
|
|||
|
||||
package com.cloud.upgrade.dao;
|
||||
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.log4j.Logger;
|
||||
import java.io.File;
|
||||
import java.sql.Connection;
|
||||
import java.sql.Date;
|
||||
|
|
@ -25,12 +29,7 @@ import java.sql.ResultSet;
|
|||
import java.sql.SQLException;
|
||||
import java.sql.Types;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.cloud.network.vpc.NetworkACL;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
public class Upgrade410to420 implements DbUpgrade {
|
||||
final static Logger s_logger = Logger.getLogger(Upgrade410to420.class);
|
||||
|
|
@ -70,6 +69,7 @@ public class Upgrade410to420 implements DbUpgrade {
|
|||
updatePrimaryStore(conn);
|
||||
addEgressFwRulesForSRXGuestNw(conn);
|
||||
upgradeEIPNetworkOfferings(conn);
|
||||
updateGlobalDeploymentPlanner(conn);
|
||||
upgradeDefaultVpcOffering(conn);
|
||||
upgradePhysicalNtwksWithInternalLbProvider(conn);
|
||||
updateNetworkACLs(conn);
|
||||
|
|
@ -563,6 +563,53 @@ public class Upgrade410to420 implements DbUpgrade {
|
|||
}
|
||||
}
|
||||
|
||||
private void updateGlobalDeploymentPlanner(Connection conn) {
|
||||
PreparedStatement pstmt = null;
|
||||
ResultSet rs = null;
|
||||
|
||||
try {
|
||||
pstmt = conn
|
||||
.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'");
|
||||
rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
String globalValue = rs.getString(1);
|
||||
String plannerName = "FirstFitPlanner";
|
||||
|
||||
if (globalValue != null) {
|
||||
if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.random.toString())) {
|
||||
plannerName = "FirstFitPlanner";
|
||||
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) {
|
||||
plannerName = "FirstFitPlanner";
|
||||
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_firstfit
|
||||
.toString())) {
|
||||
plannerName = "UserConcentratedPodPlanner";
|
||||
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_random
|
||||
.toString())) {
|
||||
plannerName = "UserConcentratedPodPlanner";
|
||||
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) {
|
||||
plannerName = "UserDispersingPlanner";
|
||||
}
|
||||
}
|
||||
// update vm.deployment.planner global config
|
||||
pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'");
|
||||
pstmt.setString(1, plannerName);
|
||||
pstmt.executeUpdate();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to set vm.deployment.planner global config", e);
|
||||
} finally {
|
||||
try {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
}
|
||||
if (pstmt != null) {
|
||||
pstmt.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void upgradeDefaultVpcOffering(Connection conn) {
|
||||
PreparedStatement pstmt = null;
|
||||
|
|
@ -596,8 +643,6 @@ public class Upgrade410to420 implements DbUpgrade {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) {
|
||||
|
||||
PreparedStatement pstmt = null;
|
||||
|
|
@ -644,7 +689,6 @@ public class Upgrade410to420 implements DbUpgrade {
|
|||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void addHostDetailsIndex(Connection conn) {
|
||||
|
|
|
|||
|
|
@ -38,14 +38,14 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
* @return list of VMInstanceVO running on that host.
|
||||
*/
|
||||
List<VMInstanceVO> listByHostId(long hostId);
|
||||
|
||||
|
||||
/**
|
||||
* List VMs by zone ID
|
||||
* @param zoneId
|
||||
* @return list of VMInstanceVO in the specified zone
|
||||
*/
|
||||
List<VMInstanceVO> listByZoneId(long zoneId);
|
||||
|
||||
|
||||
/**
|
||||
* List VMs by pod ID
|
||||
* @param podId
|
||||
|
|
@ -59,32 +59,32 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
* @return list of VMInstanceVO in the specified zone, deployed from the specified template, that are not expunged
|
||||
*/
|
||||
public List<VMInstanceVO> listNonExpungedByZoneAndTemplate(long zoneId, long templateId);
|
||||
|
||||
|
||||
/**
|
||||
* Find vm instance with names like.
|
||||
*
|
||||
*
|
||||
* @param name name that fits SQL like.
|
||||
* @return list of VMInstanceVO
|
||||
*/
|
||||
List<VMInstanceVO> findVMInstancesLike(String name);
|
||||
|
||||
|
||||
List<VMInstanceVO> findVMInTransition(Date time, State... states);
|
||||
|
||||
List<VMInstanceVO> listByTypes(VirtualMachine.Type... types);
|
||||
|
||||
|
||||
VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types);
|
||||
|
||||
|
||||
VMInstanceVO findVMByInstanceName(String name);
|
||||
|
||||
void updateProxyId(long id, Long proxyId, Date time);
|
||||
|
||||
List<VMInstanceVO> listByHostIdTypes(long hostid, VirtualMachine.Type... types);
|
||||
|
||||
|
||||
List<VMInstanceVO> listUpByHostIdTypes(long hostid, VirtualMachine.Type... types);
|
||||
List<VMInstanceVO> listByZoneIdAndType(long zoneId, VirtualMachine.Type type);
|
||||
List<VMInstanceVO> listUpByHostId(Long hostId);
|
||||
List<VMInstanceVO> listByLastHostId(Long hostId);
|
||||
|
||||
|
||||
List<VMInstanceVO> listByTypeAndState(VirtualMachine.Type type, State state);
|
||||
|
||||
List<VMInstanceVO> listByAccountId(long accountId);
|
||||
|
|
@ -92,9 +92,9 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
|
||||
List<VMInstanceVO> listByClusterId(long clusterId); // this does not pull up VMs which are starting
|
||||
List<VMInstanceVO> listLHByClusterId(long clusterId); // get all the VMs even starting one on this cluster
|
||||
|
||||
|
||||
List<VMInstanceVO> listVmsMigratingFromHost(Long hostId);
|
||||
|
||||
|
||||
public Long countRunningByHostId(long hostId);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> listClusterIdsInZoneByVmCount(long zoneId, long accountId);
|
||||
|
|
@ -106,7 +106,7 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
List<Long> listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId);
|
||||
|
||||
Long countRunningByAccount(long accountId);
|
||||
|
||||
|
||||
List<VMInstanceVO> listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types);
|
||||
|
||||
/**
|
||||
|
|
@ -116,4 +116,8 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
*/
|
||||
List<String> listDistinctHostNames(long networkId, VirtualMachine.Type... types);
|
||||
|
||||
List<VMInstanceVO> findByHostInStates(Long hostId, State... states);
|
||||
|
||||
List<VMInstanceVO> listStartingWithNoHostId();
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
|
|
@ -83,30 +83,32 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
protected GenericSearchBuilder<VMInstanceVO, Long> CountRunningByAccount;
|
||||
protected SearchBuilder<VMInstanceVO> NetworkTypeSearch;
|
||||
protected GenericSearchBuilder<VMInstanceVO, String> DistinctHostNameSearch;
|
||||
|
||||
protected SearchBuilder<VMInstanceVO> HostAndStateSearch;
|
||||
protected SearchBuilder<VMInstanceVO> StartingWithNoHostSearch;
|
||||
|
||||
@Inject ResourceTagDao _tagsDao;
|
||||
@Inject NicDao _nicDao;
|
||||
|
||||
|
||||
protected Attribute _updateTimeAttr;
|
||||
|
||||
private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 =
|
||||
|
||||
private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 =
|
||||
"SELECT host.cluster_id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE ";
|
||||
private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2 =
|
||||
" AND host.type = 'Routing' GROUP BY host.cluster_id ORDER BY 2 ASC ";
|
||||
|
||||
|
||||
private static final String ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT pod.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host_pod_ref` pod LEFT JOIN `cloud`.`vm_instance` vm ON pod.id = vm.pod_id WHERE pod.data_center_id = ? " +
|
||||
" GROUP BY pod.id ORDER BY 2 ASC ";
|
||||
|
||||
|
||||
private static final String ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT =
|
||||
"SELECT host.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE host.data_center_id = ? " +
|
||||
" AND host.pod_id = ? AND host.cluster_id = ? AND host.type = 'Routing' " +
|
||||
" GROUP BY host.id ORDER BY 2 ASC ";
|
||||
|
||||
@Inject protected HostDao _hostDao;
|
||||
|
||||
|
||||
public VMInstanceDaoImpl() {
|
||||
}
|
||||
|
||||
|
||||
@PostConstruct
|
||||
protected void init() {
|
||||
|
||||
|
|
@ -114,14 +116,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
IdStatesSearch.and("id", IdStatesSearch.entity().getId(), Op.EQ);
|
||||
IdStatesSearch.and("states", IdStatesSearch.entity().getState(), Op.IN);
|
||||
IdStatesSearch.done();
|
||||
|
||||
|
||||
VMClusterSearch = createSearchBuilder();
|
||||
SearchBuilder<HostVO> hostSearch = _hostDao.createSearchBuilder();
|
||||
VMClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), VMClusterSearch.entity().getHostId(), JoinType.INNER);
|
||||
hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
|
||||
VMClusterSearch.done();
|
||||
|
||||
|
||||
|
||||
LHVMClusterSearch = createSearchBuilder();
|
||||
SearchBuilder<HostVO> hostSearch1 = _hostDao.createSearchBuilder();
|
||||
LHVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), LHVMClusterSearch.entity().getLastHostId(), JoinType.INNER);
|
||||
|
|
@ -129,7 +131,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ);
|
||||
LHVMClusterSearch.done();
|
||||
|
||||
|
||||
|
||||
AllFieldsSearch = createSearchBuilder();
|
||||
AllFieldsSearch.and("host", AllFieldsSearch.entity().getHostId(), Op.EQ);
|
||||
AllFieldsSearch.and("lastHost", AllFieldsSearch.entity().getLastHostId(), Op.EQ);
|
||||
|
|
@ -169,23 +171,23 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
IdTypesSearch.and("id", IdTypesSearch.entity().getId(), Op.EQ);
|
||||
IdTypesSearch.and("types", IdTypesSearch.entity().getType(), Op.IN);
|
||||
IdTypesSearch.done();
|
||||
|
||||
|
||||
HostIdTypesSearch = createSearchBuilder();
|
||||
HostIdTypesSearch.and("hostid", HostIdTypesSearch.entity().getHostId(), Op.EQ);
|
||||
HostIdTypesSearch.and("types", HostIdTypesSearch.entity().getType(), Op.IN);
|
||||
HostIdTypesSearch.done();
|
||||
|
||||
|
||||
HostIdUpTypesSearch = createSearchBuilder();
|
||||
HostIdUpTypesSearch.and("hostid", HostIdUpTypesSearch.entity().getHostId(), Op.EQ);
|
||||
HostIdUpTypesSearch.and("types", HostIdUpTypesSearch.entity().getType(), Op.IN);
|
||||
HostIdUpTypesSearch.and("states", HostIdUpTypesSearch.entity().getState(), Op.NIN);
|
||||
HostIdUpTypesSearch.done();
|
||||
|
||||
|
||||
HostUpSearch = createSearchBuilder();
|
||||
HostUpSearch.and("host", HostUpSearch.entity().getHostId(), Op.EQ);
|
||||
HostUpSearch.and("states", HostUpSearch.entity().getState(), Op.IN);
|
||||
HostUpSearch.done();
|
||||
|
||||
|
||||
InstanceNameSearch = createSearchBuilder();
|
||||
InstanceNameSearch.and("instanceName", InstanceNameSearch.entity().getInstanceName(), Op.EQ);
|
||||
InstanceNameSearch.done();
|
||||
|
|
@ -194,21 +196,31 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
CountVirtualRoutersByAccount.select(null, Func.COUNT, null);
|
||||
CountVirtualRoutersByAccount.and("account", CountVirtualRoutersByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
CountVirtualRoutersByAccount.and("type", CountVirtualRoutersByAccount.entity().getType(), SearchCriteria.Op.EQ);
|
||||
CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN);
|
||||
CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN);
|
||||
CountVirtualRoutersByAccount.done();
|
||||
|
||||
|
||||
CountRunningByHost = createSearchBuilder(Long.class);
|
||||
CountRunningByHost.select(null, Func.COUNT, null);
|
||||
CountRunningByHost.and("host", CountRunningByHost.entity().getHostId(), SearchCriteria.Op.EQ);
|
||||
CountRunningByHost.and("state", CountRunningByHost.entity().getState(), SearchCriteria.Op.EQ);
|
||||
CountRunningByHost.done();
|
||||
CountRunningByHost.done();
|
||||
|
||||
CountRunningByAccount = createSearchBuilder(Long.class);
|
||||
CountRunningByAccount.select(null, Func.COUNT, null);
|
||||
CountRunningByAccount.and("account", CountRunningByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
CountRunningByAccount.and("state", CountRunningByAccount.entity().getState(), SearchCriteria.Op.EQ);
|
||||
CountRunningByAccount.done();
|
||||
|
||||
CountRunningByAccount.done();
|
||||
|
||||
HostAndStateSearch = createSearchBuilder();
|
||||
HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ);
|
||||
HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN);
|
||||
HostAndStateSearch.done();
|
||||
|
||||
StartingWithNoHostSearch = createSearchBuilder();
|
||||
StartingWithNoHostSearch.and("state", StartingWithNoHostSearch.entity().getState(), Op.EQ);
|
||||
StartingWithNoHostSearch.and("host", StartingWithNoHostSearch.entity().getHostId(), Op.NULL);
|
||||
StartingWithNoHostSearch.done();
|
||||
|
||||
_updateTimeAttr = _allAttributes.get("updateTime");
|
||||
assert _updateTimeAttr != null : "Couldn't get this updateTime attribute";
|
||||
}
|
||||
|
|
@ -219,7 +231,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("account", accountId);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> findVMInstancesLike(String name) {
|
||||
SearchCriteria<VMInstanceVO> sc = NameLikeSearch.create();
|
||||
|
|
@ -234,7 +246,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByZoneId(long zoneId) {
|
||||
SearchCriteria<VMInstanceVO> sc = AllFieldsSearch.create();
|
||||
|
|
@ -242,7 +254,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByPodId(long podId) {
|
||||
SearchCriteria<VMInstanceVO> sc = AllFieldsSearch.create();
|
||||
|
|
@ -263,7 +275,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setJoinParameters("hostSearch1", "clusterId", clusterId);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByZoneIdAndType(long zoneId, VirtualMachine.Type type) {
|
||||
SearchCriteria<VMInstanceVO> sc = AllFieldsSearch.create();
|
||||
|
|
@ -271,8 +283,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("type", type.toString());
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listNonExpungedByZoneAndTemplate(long zoneId, long templateId) {
|
||||
SearchCriteria<VMInstanceVO> sc = ZoneTemplateNonExpungedSearch.create();
|
||||
|
|
@ -310,7 +322,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging});
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listUpByHostId(Long hostId) {
|
||||
SearchCriteria<VMInstanceVO> sc = HostUpSearch.create();
|
||||
|
|
@ -318,14 +330,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("states", new Object[] {State.Starting, State.Running});
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByTypes(Type... types) {
|
||||
SearchCriteria<VMInstanceVO> sc = TypesSearch.create();
|
||||
sc.setParameters("types", (Object[]) types);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByTypeAndState(VirtualMachine.Type type, State state) {
|
||||
SearchCriteria<VMInstanceVO> sc = AllFieldsSearch.create();
|
||||
|
|
@ -348,7 +360,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("instanceName", name);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void updateProxyId(long id, Long proxyId, Date time) {
|
||||
VMInstanceVO vo = createForUpdate();
|
||||
|
|
@ -369,12 +381,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
@SuppressWarnings("unchecked")
|
||||
Pair<Long, Long> hosts = (Pair<Long,Long>)opaque;
|
||||
Long newHostId = hosts.second();
|
||||
|
||||
|
||||
VMInstanceVO vmi = (VMInstanceVO)vm;
|
||||
Long oldHostId = vmi.getHostId();
|
||||
Long oldUpdated = vmi.getUpdated();
|
||||
Date oldUpdateDate = vmi.getUpdateTime();
|
||||
|
||||
|
||||
SearchCriteria<VMInstanceVO> sc = StateChangeSearch.create();
|
||||
sc.setParameters("id", vmi.getId());
|
||||
sc.setParameters("states", oldState);
|
||||
|
|
@ -383,7 +395,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
|
||||
vmi.incrUpdated();
|
||||
UpdateBuilder ub = getUpdateBuilder(vmi);
|
||||
|
||||
|
||||
ub.set(vmi, "state", newState);
|
||||
ub.set(vmi, "hostId", newHostId);
|
||||
ub.set(vmi, "podIdToDeployIn", vmi.getPodIdToDeployIn());
|
||||
|
|
@ -393,7 +405,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
if (result == 0 && s_logger.isDebugEnabled()) {
|
||||
|
||||
VMInstanceVO vo = findByIdIncludingRemoved(vm.getId());
|
||||
|
||||
|
||||
if (vo != null) {
|
||||
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
|
||||
str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()).append("; time=").append(vo.getUpdateTime());
|
||||
|
|
@ -407,7 +419,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
}
|
||||
return result > 0;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByLastHostId(Long hostId) {
|
||||
SearchCriteria<VMInstanceVO> sc = AllFieldsSearch.create();
|
||||
|
|
@ -415,7 +427,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("state", State.Stopped);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Long countAllocatedVirtualRoutersForAccount(long accountId) {
|
||||
SearchCriteria<Long> sc = CountVirtualRoutersByAccount.create();
|
||||
|
|
@ -424,7 +436,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("state", new Object[] {State.Destroyed, State.Error, State.Expunging});
|
||||
return customSearch(sc, null).get(0);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listVmsMigratingFromHost(Long hostId) {
|
||||
SearchCriteria<VMInstanceVO> sc = AllFieldsSearch.create();
|
||||
|
|
@ -432,7 +444,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("state", State.Migrating);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Long countRunningByHostId(long hostId){
|
||||
SearchCriteria<Long> sc = CountRunningByHost.create();
|
||||
|
|
@ -455,7 +467,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, zoneId);
|
||||
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long clusterId = rs.getLong(1);
|
||||
|
|
@ -484,11 +496,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, podId);
|
||||
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long clusterId = rs.getLong(1);
|
||||
result.add(clusterId);
|
||||
result.add(clusterId);
|
||||
clusterVmCountMap.put(clusterId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, clusterVmCountMap);
|
||||
|
|
@ -511,11 +523,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, dataCenterId);
|
||||
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long podId = rs.getLong(1);
|
||||
result.add(podId);
|
||||
result.add(podId);
|
||||
podVmCountMap.put(podId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, podVmCountMap);
|
||||
|
|
@ -523,7 +535,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -538,7 +550,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
pstmt.setLong(2, dcId);
|
||||
pstmt.setLong(3, podId);
|
||||
pstmt.setLong(4, clusterId);
|
||||
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
result.add(rs.getLong(1));
|
||||
|
|
@ -548,9 +560,9 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Long countRunningByAccount(long accountId){
|
||||
SearchCriteria<Long> sc = CountRunningByAccount.create();
|
||||
|
|
@ -558,18 +570,18 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("state", State.Running);
|
||||
return customSearch(sc, null).get(0);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) {
|
||||
if (NetworkTypeSearch == null) {
|
||||
|
||||
|
||||
SearchBuilder<NicVO> nicSearch = _nicDao.createSearchBuilder();
|
||||
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
||||
|
||||
NetworkTypeSearch = createSearchBuilder();
|
||||
NetworkTypeSearch.and("types", NetworkTypeSearch.entity().getType(), SearchCriteria.Op.IN);
|
||||
NetworkTypeSearch.and("removed", NetworkTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
|
||||
NetworkTypeSearch.join("nicSearch", nicSearch, NetworkTypeSearch.entity().getId(),
|
||||
NetworkTypeSearch.join("nicSearch", nicSearch, NetworkTypeSearch.entity().getId(),
|
||||
nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER);
|
||||
NetworkTypeSearch.done();
|
||||
}
|
||||
|
|
@ -577,27 +589,27 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
SearchCriteria<VMInstanceVO> sc = NetworkTypeSearch.create();
|
||||
if (types != null && types.length != 0) {
|
||||
sc.setParameters("types", (Object[]) types);
|
||||
}
|
||||
}
|
||||
sc.setJoinParameters("nicSearch", "networkId", networkId);
|
||||
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public List<String> listDistinctHostNames(long networkId, VirtualMachine.Type... types) {
|
||||
if (DistinctHostNameSearch == null) {
|
||||
|
||||
|
||||
SearchBuilder<NicVO> nicSearch = _nicDao.createSearchBuilder();
|
||||
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
||||
|
||||
DistinctHostNameSearch = createSearchBuilder(String.class);
|
||||
DistinctHostNameSearch.selectField(DistinctHostNameSearch.entity().getHostName());
|
||||
|
||||
|
||||
DistinctHostNameSearch.and("types", DistinctHostNameSearch.entity().getType(), SearchCriteria.Op.IN);
|
||||
DistinctHostNameSearch.and("removed", DistinctHostNameSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
|
||||
DistinctHostNameSearch.join("nicSearch", nicSearch, DistinctHostNameSearch.entity().getId(),
|
||||
DistinctHostNameSearch.join("nicSearch", nicSearch, DistinctHostNameSearch.entity().getId(),
|
||||
nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER);
|
||||
DistinctHostNameSearch.done();
|
||||
}
|
||||
|
|
@ -605,12 +617,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
SearchCriteria<String> sc = DistinctHostNameSearch.create();
|
||||
if (types != null && types.length != 0) {
|
||||
sc.setParameters("types", (Object[]) types);
|
||||
}
|
||||
}
|
||||
sc.setJoinParameters("nicSearch", "networkId", networkId);
|
||||
|
||||
return customSearch(sc, null);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public boolean remove(Long id) {
|
||||
|
|
@ -625,4 +637,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> findByHostInStates(Long hostId, State... states) {
|
||||
SearchCriteria<VMInstanceVO> sc = HostAndStateSearch.create();
|
||||
sc.setParameters("host", hostId);
|
||||
sc.setParameters("states", (Object[]) states);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listStartingWithNoHostId() {
|
||||
SearchCriteria<VMInstanceVO> sc = StartingWithNoHostSearch.create();
|
||||
sc.setParameters("state", State.Starting);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||
|
||||
@Override
|
||||
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
|
||||
|
||||
s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool");
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
|
||||
|
|
@ -65,6 +65,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||
}
|
||||
|
||||
List<StoragePoolVO> pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
|
||||
|
||||
// add remaining pools in cluster, that did not match tags, to avoid set
|
||||
List<StoragePoolVO> allPools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
|
||||
allPools.removeAll(pools);
|
||||
for (StoragePoolVO pool : allPools) {
|
||||
avoid.addPool(pool.getId());
|
||||
}
|
||||
|
||||
if (pools.size() == 0) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
|
||||
|
|
@ -72,7 +80,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||
}
|
||||
return suitablePools;
|
||||
}
|
||||
|
||||
|
||||
for (StoragePoolVO pool: pools) {
|
||||
if(suitablePools.size() == returnUpTo){
|
||||
break;
|
||||
|
|
@ -80,13 +88,15 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
|
|||
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
|
||||
if (filter(avoid, pol, dskCh, plan)) {
|
||||
suitablePools.add(pol);
|
||||
} else {
|
||||
avoid.addPool(pool.getId());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
|
||||
}
|
||||
|
||||
|
||||
return suitablePools;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
if (!dskCh.useLocalStorage()) {
|
||||
return suitablePools;
|
||||
}
|
||||
|
||||
|
||||
// data disk and host identified from deploying vm (attach volume case)
|
||||
if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
|
||||
List<StoragePoolHostVO> hostPools = _poolHostDao.listByHostId(plan.getHostId());
|
||||
|
|
@ -85,7 +85,9 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
if (filter(avoid, pol, dskCh, plan)) {
|
||||
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
|
||||
suitablePools.add(pol);
|
||||
}
|
||||
} else {
|
||||
avoid.addPool(pool.getId());
|
||||
}
|
||||
}
|
||||
|
||||
if (suitablePools.size() == returnUpTo) {
|
||||
|
|
@ -101,8 +103,19 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
|
||||
if (filter(avoid, pol, dskCh, plan)) {
|
||||
suitablePools.add(pol);
|
||||
}
|
||||
} else {
|
||||
avoid.addPool(pool.getId());
|
||||
}
|
||||
}
|
||||
|
||||
// add remaining pools in cluster, that did not match tags, to avoid
|
||||
// set
|
||||
List<StoragePoolVO> allPools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
|
||||
plan.getPodId(), plan.getClusterId(), null);
|
||||
allPools.removeAll(availablePools);
|
||||
for (StoragePoolVO pool : allPools) {
|
||||
avoid.addPool(pool.getId());
|
||||
}
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -111,7 +124,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
|
||||
return suitablePools;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
|
|
|
|||
|
|
@ -39,18 +39,18 @@ import com.cloud.vm.VirtualMachineProfile;
|
|||
@Component
|
||||
public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
||||
private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
|
||||
@Inject PrimaryDataStoreDao _storagePoolDao;
|
||||
@Inject DataStoreManager dataStoreMgr;
|
||||
|
||||
@Inject PrimaryDataStoreDao _storagePoolDao;
|
||||
@Inject DataStoreManager dataStoreMgr;
|
||||
|
||||
@Override
|
||||
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
|
||||
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
|
||||
DeploymentPlan plan) {
|
||||
Volume volume = _volumeDao.findById(dskCh.getVolumeId());
|
||||
List<Volume> requestVolumes = new ArrayList<Volume>();
|
||||
requestVolumes.add(volume);
|
||||
return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected List<StoragePool> select(DiskProfile dskCh,
|
||||
VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
|
|
@ -64,9 +64,16 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
return suitablePools;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
|
||||
|
||||
|
||||
// add remaining pools in zone, that did not match tags, to avoid set
|
||||
List<StoragePoolVO> allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
|
||||
allPools.removeAll(storagePools);
|
||||
for (StoragePoolVO pool : allPools) {
|
||||
avoid.addPool(pool.getId());
|
||||
}
|
||||
|
||||
for (StoragePoolVO storage : storagePools) {
|
||||
if (suitablePools.size() == returnUpTo) {
|
||||
break;
|
||||
|
|
@ -74,7 +81,9 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
|
||||
if (filter(avoid, pol, dskCh, plan)) {
|
||||
suitablePools.add(pol);
|
||||
}
|
||||
} else {
|
||||
avoid.addPool(pol.getId());
|
||||
}
|
||||
}
|
||||
return suitablePools;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -748,11 +748,11 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
protected Void registerVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> callback, CreateVolumeContext<VolumeApiResult> context) {
|
||||
CreateCmdResult result = callback.getResult();
|
||||
VolumeObject vo = (VolumeObject)context.volume;
|
||||
/*if (result.isFailed()) {
|
||||
if (result.isFailed()) {
|
||||
vo.stateTransit(Volume.Event.OperationFailed);
|
||||
} else {
|
||||
vo.stateTransit(Volume.Event.OperationSucceeded);
|
||||
}*/
|
||||
}
|
||||
VolumeApiResult res = new VolumeApiResult(vo);
|
||||
context.future.complete(res);
|
||||
return null;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,29 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>cloud-plugin-planner-implicit-dedication</artifactId>
|
||||
<name>Apache CloudStack Plugin - Implicit Dedication Planner</name>
|
||||
<parent>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloudstack-plugins</artifactId>
|
||||
<version>4.2.0-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,249 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@Local(value=DeploymentPlanner.class)
|
||||
public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(ImplicitDedicationPlanner.class);
|
||||
|
||||
@Inject
|
||||
private ServiceOfferingDao serviceOfferingDao;
|
||||
@Inject
|
||||
private ServiceOfferingDetailsDao serviceOfferingDetailsDao;
|
||||
@Inject
|
||||
private ResourceManager resourceMgr;
|
||||
|
||||
private int capacityReleaseInterval;
|
||||
|
||||
@Override
|
||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
capacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException {
|
||||
List<Long> clusterList = super.orderClusters(vmProfile, plan, avoid);
|
||||
Set<Long> hostsToAvoid = avoid.getHostsToAvoid();
|
||||
Account account = vmProfile.getOwner();
|
||||
|
||||
if (clusterList == null || clusterList.isEmpty()) {
|
||||
return clusterList;
|
||||
}
|
||||
|
||||
// Check if strict or preferred mode should be used.
|
||||
boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId());
|
||||
|
||||
// Get the list of all the hosts in the given clusters
|
||||
List<Long> allHosts = new ArrayList<Long>();
|
||||
for (Long cluster : clusterList) {
|
||||
List<HostVO> hostsInCluster = resourceMgr.listAllHostsInCluster(cluster);
|
||||
for (HostVO hostVO : hostsInCluster) {
|
||||
allHosts.add(hostVO.getId());
|
||||
}
|
||||
}
|
||||
|
||||
// Go over all the hosts in the cluster and get a list of
|
||||
// 1. All empty hosts, not running any vms.
|
||||
// 2. Hosts running vms for this account and created by a service offering which uses an
|
||||
// implicit dedication planner.
|
||||
// 3. Hosts running vms created by implicit planner and in strict mode of other accounts.
|
||||
// 4. Hosts running vms from other account or from this account but created by a service offering which uses
|
||||
// any planner besides implicit.
|
||||
Set<Long> emptyHosts = new HashSet<Long>();
|
||||
Set<Long> hostRunningVmsOfAccount = new HashSet<Long>();
|
||||
Set<Long> hostRunningStrictImplicitVmsOfOtherAccounts = new HashSet<Long>();
|
||||
Set<Long> allOtherHosts = new HashSet<Long>();
|
||||
for (Long host : allHosts) {
|
||||
List<UserVmVO> userVms = getVmsOnHost(host);
|
||||
if (userVms == null || userVms.isEmpty()) {
|
||||
emptyHosts.add(host);
|
||||
} else if (checkHostSuitabilityForImplicitDedication(account.getAccountId(), userVms)) {
|
||||
hostRunningVmsOfAccount.add(host);
|
||||
} else if (checkIfAllVmsCreatedInStrictMode(account.getAccountId(), userVms)) {
|
||||
hostRunningStrictImplicitVmsOfOtherAccounts.add(host);
|
||||
} else {
|
||||
allOtherHosts.add(host);
|
||||
}
|
||||
}
|
||||
|
||||
// Hosts running vms of other accounts created by ab implicit planner in strict mode should always be avoided.
|
||||
avoid.addHostList(hostRunningStrictImplicitVmsOfOtherAccounts);
|
||||
|
||||
if (!hostRunningVmsOfAccount.isEmpty() && (hostsToAvoid == null ||
|
||||
!hostsToAvoid.containsAll(hostRunningVmsOfAccount))) {
|
||||
// Check if any of hosts that are running implicit dedicated vms are available (not in avoid list).
|
||||
// If so, we'll try and use these hosts.
|
||||
avoid.addHostList(emptyHosts);
|
||||
avoid.addHostList(allOtherHosts);
|
||||
clusterList = getUpdatedClusterList(clusterList, avoid.getHostsToAvoid());
|
||||
} else if (!emptyHosts.isEmpty() && (hostsToAvoid == null || !hostsToAvoid.containsAll(emptyHosts))) {
|
||||
// If there aren't implicit resources try on empty hosts
|
||||
avoid.addHostList(allOtherHosts);
|
||||
clusterList = getUpdatedClusterList(clusterList, avoid.getHostsToAvoid());
|
||||
} else if (!preferred) {
|
||||
// If in strict mode, there is nothing else to try.
|
||||
clusterList = null;
|
||||
} else {
|
||||
// If in preferred mode, check if hosts are available to try, otherwise return an empty cluster list.
|
||||
if (!allOtherHosts.isEmpty() && (hostsToAvoid == null || !hostsToAvoid.containsAll(allOtherHosts))) {
|
||||
clusterList = getUpdatedClusterList(clusterList, avoid.getHostsToAvoid());
|
||||
} else {
|
||||
clusterList = null;
|
||||
}
|
||||
}
|
||||
|
||||
return clusterList;
|
||||
}
|
||||
|
||||
private List<UserVmVO> getVmsOnHost(long hostId) {
|
||||
List<UserVmVO> vms = _vmDao.listUpByHostId(hostId);
|
||||
List<UserVmVO> vmsByLastHostId = _vmDao.listByLastHostId(hostId);
|
||||
if (vmsByLastHostId.size() > 0) {
|
||||
// check if any VMs are within skip.counting.hours, if yes we have to consider the host.
|
||||
for (UserVmVO stoppedVM : vmsByLastHostId) {
|
||||
long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime()
|
||||
.getTime()) / 1000;
|
||||
if (secondsSinceLastUpdate < capacityReleaseInterval) {
|
||||
vms.add(stoppedVM);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vms;
|
||||
}
|
||||
|
||||
private boolean checkHostSuitabilityForImplicitDedication(Long accountId, List<UserVmVO> allVmsOnHost) {
|
||||
boolean suitable = true;
|
||||
for (UserVmVO vm : allVmsOnHost) {
|
||||
if (vm.getAccountId() != accountId) {
|
||||
s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " +
|
||||
"running instances of another account");
|
||||
suitable = false;
|
||||
break;
|
||||
} else {
|
||||
if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) {
|
||||
s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it " +
|
||||
"is running instances of this account which haven't been created using implicit dedication.");
|
||||
suitable = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return suitable;
|
||||
}
|
||||
|
||||
private boolean checkIfAllVmsCreatedInStrictMode(Long accountId, List<UserVmVO> allVmsOnHost) {
|
||||
boolean createdByImplicitStrict = true;
|
||||
for (UserVmVO vm : allVmsOnHost) {
|
||||
if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) {
|
||||
s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" +
|
||||
" than implicit.");
|
||||
createdByImplicitStrict = false;
|
||||
break;
|
||||
} else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) {
|
||||
s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" +
|
||||
" in preferred mode.");
|
||||
createdByImplicitStrict = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return createdByImplicitStrict;
|
||||
}
|
||||
|
||||
private boolean isImplicitPlannerUsedByOffering(long offeringId) {
|
||||
boolean implicitPlannerUsed = false;
|
||||
ServiceOfferingVO offering = serviceOfferingDao.findByIdIncludingRemoved(offeringId);
|
||||
if (offering == null) {
|
||||
s_logger.error("Couldn't retrieve the offering by the given id : " + offeringId);
|
||||
} else {
|
||||
String plannerName = offering.getDeploymentPlanner();
|
||||
if (plannerName == null) {
|
||||
plannerName = _globalDeploymentPlanner;
|
||||
}
|
||||
|
||||
if (plannerName != null && this.getName().equals(plannerName)) {
|
||||
implicitPlannerUsed = true;
|
||||
}
|
||||
}
|
||||
|
||||
return implicitPlannerUsed;
|
||||
}
|
||||
|
||||
private boolean isServiceOfferingUsingPlannerInPreferredMode(long serviceOfferingId) {
|
||||
boolean preferred = false;
|
||||
Map<String, String> details = serviceOfferingDetailsDao.findDetails(serviceOfferingId);
|
||||
if (details != null && !details.isEmpty()) {
|
||||
String preferredAttribute = details.get("ImplicitDedicationMode");
|
||||
if (preferredAttribute != null && preferredAttribute.equals("Preferred")) {
|
||||
preferred = true;
|
||||
}
|
||||
}
|
||||
return preferred;
|
||||
}
|
||||
|
||||
private List<Long> getUpdatedClusterList(List<Long> clusterList, Set<Long> hostsSet) {
|
||||
List<Long> updatedClusterList = new ArrayList<Long>();
|
||||
for (Long cluster : clusterList) {
|
||||
List<HostVO> hosts = resourceMgr.listAllHostsInCluster(cluster);
|
||||
Set<Long> hostsInClusterSet = new HashSet<Long>();
|
||||
for (HostVO host : hosts) {
|
||||
hostsInClusterSet.add(host.getId());
|
||||
}
|
||||
|
||||
if (!hostsSet.containsAll(hostsInClusterSet)) {
|
||||
updatedClusterList.add(cluster);
|
||||
}
|
||||
}
|
||||
|
||||
return updatedClusterList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PlannerResourceUsage getResourceUsage() {
|
||||
return PlannerResourceUsage.Dedicated;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,586 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.implicitplanner;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.test.utils.SpringUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mockito;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.ComponentScan;
|
||||
import org.springframework.context.annotation.ComponentScan.Filter;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.FilterType;
|
||||
import org.springframework.core.type.classreading.MetadataReader;
|
||||
import org.springframework.core.type.classreading.MetadataReaderFactory;
|
||||
import org.springframework.core.type.filter.TypeFilter;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.test.context.support.AnnotationConfigContextLoader;
|
||||
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.capacity.CapacityVO;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.deploy.ImplicitDedicationPlanner;
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.storage.dao.GuestOSCategoryDao;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.AccountVO;
|
||||
import com.cloud.user.UserContext;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachineProfileImpl;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
|
||||
public class ImplicitPlannerTest {
|
||||
|
||||
@Inject
|
||||
ImplicitDedicationPlanner planner = new ImplicitDedicationPlanner();
|
||||
@Inject
|
||||
HostDao hostDao;
|
||||
@Inject
|
||||
DataCenterDao dcDao;
|
||||
@Inject
|
||||
HostPodDao podDao;
|
||||
@Inject
|
||||
ClusterDao clusterDao;
|
||||
@Inject
|
||||
GuestOSDao guestOSDao;
|
||||
@Inject
|
||||
GuestOSCategoryDao guestOSCategoryDao;
|
||||
@Inject
|
||||
DiskOfferingDao diskOfferingDao;
|
||||
@Inject
|
||||
StoragePoolHostDao poolHostDao;
|
||||
@Inject
|
||||
UserVmDao vmDao;
|
||||
@Inject
|
||||
VMInstanceDao vmInstanceDao;
|
||||
@Inject
|
||||
VolumeDao volsDao;
|
||||
@Inject
|
||||
CapacityManager capacityMgr;
|
||||
@Inject
|
||||
ConfigurationDao configDao;
|
||||
@Inject
|
||||
PrimaryDataStoreDao storagePoolDao;
|
||||
@Inject
|
||||
CapacityDao capacityDao;
|
||||
@Inject
|
||||
AccountManager accountMgr;
|
||||
@Inject
|
||||
StorageManager storageMgr;
|
||||
@Inject
|
||||
DataStoreManager dataStoreMgr;
|
||||
@Inject
|
||||
ClusterDetailsDao clusterDetailsDao;
|
||||
@Inject
|
||||
ServiceOfferingDao serviceOfferingDao;
|
||||
@Inject
|
||||
ServiceOfferingDetailsDao serviceOfferingDetailsDao;
|
||||
@Inject
|
||||
ResourceManager resourceMgr;
|
||||
|
||||
private static long domainId = 5L;
|
||||
long dataCenterId = 1L;
|
||||
long accountId = 200L;
|
||||
long offeringId = 12L;
|
||||
int noOfCpusInOffering = 1;
|
||||
int cpuSpeedInOffering = 500;
|
||||
int ramInOffering = 512;
|
||||
AccountVO acct = new AccountVO(accountId);
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws ConfigurationException {
|
||||
}
|
||||
|
||||
@Before
|
||||
public void testSetUp() {
|
||||
ComponentContext.initComponentsLifeCycle();
|
||||
|
||||
acct.setType(Account.ACCOUNT_TYPE_NORMAL);
|
||||
acct.setAccountName("user1");
|
||||
acct.setDomainId(domainId);
|
||||
acct.setId(accountId);
|
||||
|
||||
UserContext.registerContext(1, acct, null, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkWhenDcInAvoidList() throws InsufficientServerCapacityException {
|
||||
DataCenterVO mockDc = mock(DataCenterVO.class);
|
||||
ExcludeList avoids = mock(ExcludeList.class);
|
||||
@SuppressWarnings("unchecked")
|
||||
VirtualMachineProfileImpl<VMInstanceVO> vmProfile = mock(VirtualMachineProfileImpl.class);
|
||||
VMInstanceVO vm = mock(VMInstanceVO.class);
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
|
||||
when(avoids.shouldAvoid(mockDc)).thenReturn(true);
|
||||
when(vmProfile.getVirtualMachine()).thenReturn(vm);
|
||||
when(vm.getDataCenterId()).thenReturn(1L);
|
||||
when(dcDao.findById(1L)).thenReturn(mockDc);
|
||||
|
||||
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
|
||||
assertTrue("Cluster list should be null/empty if the dc is in avoid list",
|
||||
(clusterList == null || clusterList.isEmpty()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkStrictModeWithCurrentAccountVmsPresent() throws InsufficientServerCapacityException {
|
||||
@SuppressWarnings("unchecked")
|
||||
VirtualMachineProfileImpl<VMInstanceVO> vmProfile = mock(VirtualMachineProfileImpl.class);
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
ExcludeList avoids = new ExcludeList();
|
||||
|
||||
initializeForTest(vmProfile, plan);
|
||||
|
||||
initializeForImplicitPlannerTest(false);
|
||||
|
||||
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
|
||||
|
||||
// Validations.
|
||||
// Check cluster 2 and 3 are not in the cluster list.
|
||||
// Host 6 and 7 should also be in avoid list.
|
||||
assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
|
||||
boolean foundNeededCluster = false;
|
||||
for (Long cluster : clusterList) {
|
||||
if (cluster != 1) {
|
||||
fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
|
||||
}else {
|
||||
foundNeededCluster = true;
|
||||
}
|
||||
}
|
||||
assertTrue("Didn't find cluster 1 in the list. It should have been present", foundNeededCluster);
|
||||
|
||||
Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
|
||||
assertFalse("Host 5 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(5L));
|
||||
Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
|
||||
hostsThatShouldBeInAvoidList.add(6L);
|
||||
hostsThatShouldBeInAvoidList.add(7L);
|
||||
assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list" ,
|
||||
hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkStrictModeHostWithCurrentAccountVmsFull() throws InsufficientServerCapacityException {
|
||||
@SuppressWarnings("unchecked")
|
||||
VirtualMachineProfileImpl<VMInstanceVO> vmProfile = mock(VirtualMachineProfileImpl.class);
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
ExcludeList avoids = new ExcludeList();
|
||||
|
||||
initializeForTest(vmProfile, plan);
|
||||
|
||||
initializeForImplicitPlannerTest(false);
|
||||
|
||||
// Mark the host 5 with current account vms to be in avoid list.
|
||||
avoids.addHost(5L);
|
||||
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
|
||||
|
||||
// Validations.
|
||||
// Check cluster 1 and 3 are not in the cluster list.
|
||||
// Host 5 and 7 should also be in avoid list.
|
||||
assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
|
||||
boolean foundNeededCluster = false;
|
||||
for (Long cluster : clusterList) {
|
||||
if (cluster != 2) {
|
||||
fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
|
||||
}else {
|
||||
foundNeededCluster = true;
|
||||
}
|
||||
}
|
||||
assertTrue("Didn't find cluster 2 in the list. It should have been present", foundNeededCluster);
|
||||
|
||||
Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
|
||||
assertFalse("Host 6 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(6L));
|
||||
Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
|
||||
hostsThatShouldBeInAvoidList.add(5L);
|
||||
hostsThatShouldBeInAvoidList.add(7L);
|
||||
assertTrue("Hosts 5 and 7 that should have been present were not found in avoid list" ,
|
||||
hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkStrictModeNoHostsAvailable() throws InsufficientServerCapacityException {
|
||||
@SuppressWarnings("unchecked")
|
||||
VirtualMachineProfileImpl<VMInstanceVO> vmProfile = mock(VirtualMachineProfileImpl.class);
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
ExcludeList avoids = new ExcludeList();
|
||||
|
||||
initializeForTest(vmProfile, plan);
|
||||
|
||||
initializeForImplicitPlannerTest(false);
|
||||
|
||||
// Mark the host 5 and 6 to be in avoid list.
|
||||
avoids.addHost(5L);
|
||||
avoids.addHost(6L);
|
||||
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
|
||||
|
||||
// Validations.
|
||||
// Check cluster list is empty.
|
||||
assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkPreferredModePreferredHostAvailable() throws InsufficientServerCapacityException {
|
||||
@SuppressWarnings("unchecked")
|
||||
VirtualMachineProfileImpl<VMInstanceVO> vmProfile = mock(VirtualMachineProfileImpl.class);
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
ExcludeList avoids = new ExcludeList();
|
||||
|
||||
initializeForTest(vmProfile, plan);
|
||||
|
||||
initializeForImplicitPlannerTest(true);
|
||||
|
||||
// Mark the host 5 and 6 to be in avoid list.
|
||||
avoids.addHost(5L);
|
||||
avoids.addHost(6L);
|
||||
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
|
||||
|
||||
// Validations.
|
||||
// Check cluster 1 and 2 are not in the cluster list.
|
||||
// Host 5 and 6 should also be in avoid list.
|
||||
assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
|
||||
boolean foundNeededCluster = false;
|
||||
for (Long cluster : clusterList) {
|
||||
if (cluster != 3) {
|
||||
fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
|
||||
} else {
|
||||
foundNeededCluster = true;
|
||||
}
|
||||
}
|
||||
assertTrue("Didn't find cluster 3 in the list. It should have been present", foundNeededCluster);
|
||||
|
||||
Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
|
||||
assertFalse("Host 7 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(7L));
|
||||
Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
|
||||
hostsThatShouldBeInAvoidList.add(5L);
|
||||
hostsThatShouldBeInAvoidList.add(6L);
|
||||
assertTrue("Hosts 5 and 6 that should have been present were not found in avoid list" ,
|
||||
hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkPreferredModeNoHostsAvailable() throws InsufficientServerCapacityException {
|
||||
@SuppressWarnings("unchecked")
|
||||
VirtualMachineProfileImpl<VMInstanceVO> vmProfile = mock(VirtualMachineProfileImpl.class);
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
ExcludeList avoids = new ExcludeList();
|
||||
|
||||
initializeForTest(vmProfile, plan);
|
||||
|
||||
initializeForImplicitPlannerTest(false);
|
||||
|
||||
// Mark the host 5, 6 and 7 to be in avoid list.
|
||||
avoids.addHost(5L);
|
||||
avoids.addHost(6L);
|
||||
avoids.addHost(7L);
|
||||
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
|
||||
|
||||
// Validations.
|
||||
// Check cluster list is empty.
|
||||
assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
|
||||
}
|
||||
|
||||
private void initializeForTest(VirtualMachineProfileImpl<VMInstanceVO> vmProfile, DataCenterDeployment plan) {
|
||||
DataCenterVO mockDc = mock(DataCenterVO.class);
|
||||
VMInstanceVO vm = mock(VMInstanceVO.class);
|
||||
UserVmVO userVm = mock(UserVmVO.class);
|
||||
ServiceOfferingVO offering = mock(ServiceOfferingVO.class);
|
||||
|
||||
AccountVO account = mock(AccountVO.class);
|
||||
when(account.getId()).thenReturn(accountId);
|
||||
when(account.getAccountId()).thenReturn(accountId);
|
||||
when(vmProfile.getOwner()).thenReturn(account);
|
||||
when(vmProfile.getVirtualMachine()).thenReturn(vm);
|
||||
when(vmProfile.getId()).thenReturn(12L);
|
||||
when(vmDao.findById(12L)).thenReturn(userVm);
|
||||
when(userVm.getAccountId()).thenReturn(accountId);
|
||||
|
||||
when(vm.getDataCenterId()).thenReturn(dataCenterId);
|
||||
when(dcDao.findById(1L)).thenReturn(mockDc);
|
||||
when(plan.getDataCenterId()).thenReturn(dataCenterId);
|
||||
when(plan.getClusterId()).thenReturn(null);
|
||||
when(plan.getPodId()).thenReturn(null);
|
||||
when(configDao.getValue(anyString())).thenReturn("false").thenReturn("CPU");
|
||||
|
||||
// Mock offering details.
|
||||
when(vmProfile.getServiceOffering()).thenReturn(offering);
|
||||
when(offering.getId()).thenReturn(offeringId);
|
||||
when(vmProfile.getServiceOfferingId()).thenReturn(offeringId);
|
||||
when(offering.getCpu()).thenReturn(noOfCpusInOffering);
|
||||
when(offering.getSpeed()).thenReturn(cpuSpeedInOffering);
|
||||
when(offering.getRamSize()).thenReturn(ramInOffering);
|
||||
|
||||
List<Long> clustersWithEnoughCapacity = new ArrayList<Long>();
|
||||
clustersWithEnoughCapacity.add(1L);
|
||||
clustersWithEnoughCapacity.add(2L);
|
||||
clustersWithEnoughCapacity.add(3L);
|
||||
when(capacityDao.listClustersInZoneOrPodByHostCapacities(dataCenterId, noOfCpusInOffering * cpuSpeedInOffering,
|
||||
ramInOffering * 1024L * 1024L, CapacityVO.CAPACITY_TYPE_CPU, true)).thenReturn(clustersWithEnoughCapacity);
|
||||
|
||||
Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
|
||||
clusterCapacityMap.put(1L, 2048D);
|
||||
clusterCapacityMap.put(2L, 2048D);
|
||||
clusterCapacityMap.put(3L, 2048D);
|
||||
Pair<List<Long>, Map<Long, Double>> clustersOrderedByCapacity =
|
||||
new Pair<List<Long>, Map<Long, Double>>(clustersWithEnoughCapacity, clusterCapacityMap);
|
||||
when(capacityDao.orderClustersByAggregateCapacity(dataCenterId, CapacityVO.CAPACITY_TYPE_CPU,
|
||||
true)).thenReturn(clustersOrderedByCapacity);
|
||||
|
||||
List<Long> disabledClusters = new ArrayList<Long>();
|
||||
List<Long> clustersWithDisabledPods = new ArrayList<Long>();
|
||||
when(clusterDao.listDisabledClusters(dataCenterId, null)).thenReturn(disabledClusters);
|
||||
when(clusterDao.listClustersWithDisabledPods(dataCenterId)).thenReturn(clustersWithDisabledPods);
|
||||
}
|
||||
|
||||
private void initializeForImplicitPlannerTest(boolean preferred) {
|
||||
String plannerMode = new String("Strict");
|
||||
if (preferred) {
|
||||
plannerMode = new String("Preferred");
|
||||
}
|
||||
|
||||
Map<String, String> details = new HashMap<String, String>();
|
||||
details.put("ImplicitDedicationMode", plannerMode);
|
||||
when(serviceOfferingDetailsDao.findDetails(offeringId)).thenReturn(details);
|
||||
|
||||
// Initialize hosts in clusters
|
||||
HostVO host1 = mock(HostVO.class);
|
||||
when(host1.getId()).thenReturn(5L);
|
||||
HostVO host2 = mock(HostVO.class);
|
||||
when(host2.getId()).thenReturn(6L);
|
||||
HostVO host3 = mock(HostVO.class);
|
||||
when(host3.getId()).thenReturn(7L);
|
||||
List<HostVO> hostsInCluster1 = new ArrayList<HostVO>();
|
||||
List<HostVO> hostsInCluster2 = new ArrayList<HostVO>();
|
||||
List<HostVO> hostsInCluster3 = new ArrayList<HostVO>();
|
||||
hostsInCluster1.add(host1);
|
||||
hostsInCluster2.add(host2);
|
||||
hostsInCluster3.add(host3);
|
||||
when(resourceMgr.listAllHostsInCluster(1)).thenReturn(hostsInCluster1);
|
||||
when(resourceMgr.listAllHostsInCluster(2)).thenReturn(hostsInCluster2);
|
||||
when(resourceMgr.listAllHostsInCluster(3)).thenReturn(hostsInCluster3);
|
||||
|
||||
// Mock vms on each host.
|
||||
long offeringIdForVmsOfThisAccount = 15L;
|
||||
long offeringIdForVmsOfOtherAccount = 16L;
|
||||
UserVmVO vm1 = mock(UserVmVO.class);
|
||||
when(vm1.getAccountId()).thenReturn(accountId);
|
||||
when(vm1.getServiceOfferingId()).thenReturn(offeringIdForVmsOfThisAccount);
|
||||
UserVmVO vm2 = mock(UserVmVO.class);
|
||||
when(vm2.getAccountId()).thenReturn(accountId);
|
||||
when(vm2.getServiceOfferingId()).thenReturn(offeringIdForVmsOfThisAccount);
|
||||
// Vm from different account
|
||||
UserVmVO vm3 = mock(UserVmVO.class);
|
||||
when(vm3.getAccountId()).thenReturn(201L);
|
||||
when(vm3.getServiceOfferingId()).thenReturn(offeringIdForVmsOfOtherAccount);
|
||||
List<UserVmVO> userVmsForHost1 = new ArrayList<UserVmVO>();
|
||||
List<UserVmVO> userVmsForHost2 = new ArrayList<UserVmVO>();
|
||||
List<UserVmVO> userVmsForHost3 = new ArrayList<UserVmVO>();
|
||||
List<UserVmVO> stoppedVmsForHost = new ArrayList<UserVmVO>();
|
||||
// Host 2 is empty.
|
||||
userVmsForHost1.add(vm1);
|
||||
userVmsForHost1.add(vm2);
|
||||
userVmsForHost3.add(vm3);
|
||||
when(vmDao.listUpByHostId(5L)).thenReturn(userVmsForHost1);
|
||||
when(vmDao.listUpByHostId(6L)).thenReturn(userVmsForHost2);
|
||||
when(vmDao.listUpByHostId(7L)).thenReturn(userVmsForHost3);
|
||||
when(vmDao.listByLastHostId(5L)).thenReturn(stoppedVmsForHost);
|
||||
when(vmDao.listByLastHostId(6L)).thenReturn(stoppedVmsForHost);
|
||||
when(vmDao.listByLastHostId(7L)).thenReturn(stoppedVmsForHost);
|
||||
|
||||
// Mock the offering with which the vm was created.
|
||||
ServiceOfferingVO offeringForVmOfThisAccount = mock(ServiceOfferingVO.class);
|
||||
when(serviceOfferingDao.findByIdIncludingRemoved(offeringIdForVmsOfThisAccount)).thenReturn(offeringForVmOfThisAccount);
|
||||
when(offeringForVmOfThisAccount.getDeploymentPlanner()).thenReturn(planner.getName());
|
||||
|
||||
ServiceOfferingVO offeringForVMOfOtherAccount = mock(ServiceOfferingVO.class);
|
||||
when(serviceOfferingDao.findByIdIncludingRemoved(offeringIdForVmsOfOtherAccount)).thenReturn(offeringForVMOfOtherAccount);
|
||||
when(offeringForVMOfOtherAccount.getDeploymentPlanner()).thenReturn("FirstFitPlanner");
|
||||
}
|
||||
|
||||
@Configuration
|
||||
@ComponentScan(basePackageClasses = { ImplicitDedicationPlanner.class },
|
||||
includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)},
|
||||
useDefaultFilters = false)
|
||||
public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration {
|
||||
|
||||
@Bean
|
||||
public HostDao hostDao() {
|
||||
return Mockito.mock(HostDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DataCenterDao dcDao() {
|
||||
return Mockito.mock(DataCenterDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public HostPodDao hostPodDao() {
|
||||
return Mockito.mock(HostPodDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ClusterDao clusterDao() {
|
||||
return Mockito.mock(ClusterDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GuestOSDao guestOsDao() {
|
||||
return Mockito.mock(GuestOSDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GuestOSCategoryDao guestOsCategoryDao() {
|
||||
return Mockito.mock(GuestOSCategoryDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DiskOfferingDao diskOfferingDao() {
|
||||
return Mockito.mock(DiskOfferingDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StoragePoolHostDao storagePoolHostDao() {
|
||||
return Mockito.mock(StoragePoolHostDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public UserVmDao userVmDao() {
|
||||
return Mockito.mock(UserVmDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public VMInstanceDao vmInstanceDao() {
|
||||
return Mockito.mock(VMInstanceDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public VolumeDao volumeDao() {
|
||||
return Mockito.mock(VolumeDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CapacityManager capacityManager() {
|
||||
return Mockito.mock(CapacityManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ConfigurationDao configurationDao() {
|
||||
return Mockito.mock(ConfigurationDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public PrimaryDataStoreDao primaryDataStoreDao() {
|
||||
return Mockito.mock(PrimaryDataStoreDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CapacityDao capacityDao() {
|
||||
return Mockito.mock(CapacityDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AccountManager accountManager() {
|
||||
return Mockito.mock(AccountManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StorageManager storageManager() {
|
||||
return Mockito.mock(StorageManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DataStoreManager dataStoreManager() {
|
||||
return Mockito.mock(DataStoreManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ClusterDetailsDao clusterDetailsDao() {
|
||||
return Mockito.mock(ClusterDetailsDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ServiceOfferingDao serviceOfferingDao() {
|
||||
return Mockito.mock(ServiceOfferingDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ServiceOfferingDetailsDao serviceOfferingDetailsDao() {
|
||||
return Mockito.mock(ServiceOfferingDetailsDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ResourceManager resourceManager() {
|
||||
return Mockito.mock(ResourceManager.class);
|
||||
}
|
||||
|
||||
public static class Library implements TypeFilter {
|
||||
@Override
|
||||
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
|
||||
ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class);
|
||||
return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -11,7 +11,7 @@
|
|||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
|
@ -24,18 +24,17 @@ import javax.ejb.Local;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@Local(value=DeploymentPlanner.class)
|
||||
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner {
|
||||
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class);
|
||||
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
|
||||
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account
|
||||
* This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level.
|
||||
|
|
@ -49,7 +48,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
|
|||
}
|
||||
return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId());
|
||||
}
|
||||
|
||||
|
||||
private List<Long> applyUserConcentrationPodHeuristicToClusters(long zoneId, List<Long> prioritizedClusterIds, long accountId){
|
||||
//user has VMs in certain pods. - prioritize those pods first
|
||||
//UserConcentratedPod strategy
|
||||
|
|
@ -61,8 +60,8 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
|
|||
clusterList = prioritizedClusterIds;
|
||||
}
|
||||
return clusterList;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private List<Long> reorderClustersByPods(List<Long> clusterIds, List<Long> podIds) {
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -111,11 +110,11 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
|
|||
|
||||
return prioritizedPods;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
|
||||
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account
|
||||
* For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account
|
||||
* @return List<Long> ordered list of Pod Ids
|
||||
*/
|
||||
@Override
|
||||
|
|
@ -124,7 +123,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
|
|||
if(vmProfile.getOwner() == null){
|
||||
return podIdsByCapacity;
|
||||
}
|
||||
long accountId = vmProfile.getOwner().getAccountId();
|
||||
long accountId = vmProfile.getOwner().getAccountId();
|
||||
|
||||
//user has VMs in certain pods. - prioritize those pods first
|
||||
//UserConcentratedPod strategy
|
||||
|
|
@ -138,18 +137,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
|
|||
}else{
|
||||
return podIdsByCapacity;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
|
||||
if(vm.getHypervisorType() != HypervisorType.BareMetal){
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString()))){
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,14 +29,13 @@ import javax.naming.ConfigurationException;
|
|||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@Local(value=DeploymentPlanner.class)
|
||||
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner {
|
||||
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class);
|
||||
|
||||
|
|
@ -191,17 +190,6 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
|
||||
if(vm.getHypervisorType() != HypervisorType.BareMetal){
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
float _userDispersionWeight;
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2842,7 +2842,7 @@ ServerResource {
|
|||
Pair<Double, Double> nicStats = getNicStats(_publicBridgeName);
|
||||
|
||||
HostStatsEntry hostStats = new HostStatsEntry(cmd.getHostId(), cpuUtil,
|
||||
nicStats.first() / 1000, nicStats.second() / 1000, "host",
|
||||
nicStats.first() / 1024, nicStats.second() / 1024, "host",
|
||||
totMem, freeMem, 0, 0);
|
||||
return new GetHostStatsAnswer(cmd, hostStats);
|
||||
}
|
||||
|
|
@ -3581,6 +3581,7 @@ ServerResource {
|
|||
List<DiskDef> disks = null;
|
||||
Domain dm = null;
|
||||
DiskDef diskdef = null;
|
||||
KVMStoragePool attachingPool = attachingDisk.getPool();
|
||||
try {
|
||||
if (!attach) {
|
||||
dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName
|
||||
|
|
@ -3605,7 +3606,12 @@ ServerResource {
|
|||
}
|
||||
} else {
|
||||
diskdef = new DiskDef();
|
||||
if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
|
||||
if (attachingPool.getType() == StoragePoolType.RBD) {
|
||||
diskdef.defNetworkBasedDisk(attachingDisk.getPath(),
|
||||
attachingPool.getSourceHost(), attachingPool.getSourcePort(),
|
||||
attachingPool.getAuthUserName(), attachingPool.getUuid(), devId,
|
||||
DiskDef.diskBus.VIRTIO, diskProtocol.RBD);
|
||||
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
|
||||
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId,
|
||||
DiskDef.diskBus.VIRTIO, DiskDef.diskFmtType.QCOW2);
|
||||
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
|
||||
|
|
@ -4561,10 +4567,10 @@ ServerResource {
|
|||
if (oldStats != null) {
|
||||
long deltarx = rx - oldStats._rx;
|
||||
if (deltarx > 0)
|
||||
stats.setNetworkReadKBs(deltarx / 1000);
|
||||
stats.setNetworkReadKBs(deltarx / 1024);
|
||||
long deltatx = tx - oldStats._tx;
|
||||
if (deltatx > 0)
|
||||
stats.setNetworkWriteKBs(deltatx / 1000);
|
||||
stats.setNetworkWriteKBs(deltatx / 1024);
|
||||
}
|
||||
|
||||
vmStats newStat = new vmStats();
|
||||
|
|
|
|||
|
|
@ -16,6 +16,32 @@
|
|||
// under the License.
|
||||
package com.cloud.hypervisor.vmware.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.rmi.RemoteException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.TimeZone;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.NDC;
|
||||
|
||||
import com.cloud.agent.IAgentControl;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.AttachIsoCommand;
|
||||
|
|
@ -47,7 +73,6 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand;
|
|||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.DeleteVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.DeleteVMSnapshotCommand;
|
||||
import com.cloud.agent.api.UnregisterVMCommand;
|
||||
import com.cloud.agent.api.GetDomRVersionAnswer;
|
||||
import com.cloud.agent.api.GetDomRVersionCmd;
|
||||
import com.cloud.agent.api.GetHostStatsAnswer;
|
||||
|
|
@ -78,6 +103,7 @@ import com.cloud.agent.api.PlugNicCommand;
|
|||
import com.cloud.agent.api.PoolEjectCommand;
|
||||
import com.cloud.agent.api.PrepareForMigrationAnswer;
|
||||
import com.cloud.agent.api.PrepareForMigrationCommand;
|
||||
import com.cloud.agent.api.PvlanSetupCommand;
|
||||
import com.cloud.agent.api.ReadyAnswer;
|
||||
import com.cloud.agent.api.ReadyCommand;
|
||||
import com.cloud.agent.api.RebootAnswer;
|
||||
|
|
@ -85,8 +111,8 @@ import com.cloud.agent.api.RebootCommand;
|
|||
import com.cloud.agent.api.RebootRouterCommand;
|
||||
import com.cloud.agent.api.RevertToVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.RevertToVMSnapshotCommand;
|
||||
import com.cloud.agent.api.ScaleVmCommand;
|
||||
import com.cloud.agent.api.ScaleVmAnswer;
|
||||
import com.cloud.agent.api.ScaleVmCommand;
|
||||
import com.cloud.agent.api.SetupAnswer;
|
||||
import com.cloud.agent.api.SetupCommand;
|
||||
import com.cloud.agent.api.SetupGuestNetworkAnswer;
|
||||
|
|
@ -101,6 +127,7 @@ import com.cloud.agent.api.StopCommand;
|
|||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.agent.api.UnPlugNicAnswer;
|
||||
import com.cloud.agent.api.UnPlugNicCommand;
|
||||
import com.cloud.agent.api.UnregisterVMCommand;
|
||||
import com.cloud.agent.api.UpgradeSnapshotCommand;
|
||||
import com.cloud.agent.api.ValidateSnapshotAnswer;
|
||||
import com.cloud.agent.api.ValidateSnapshotCommand;
|
||||
|
|
@ -135,14 +162,14 @@ import com.cloud.agent.api.routing.VmDataCommand;
|
|||
import com.cloud.agent.api.routing.VpnUsersCfgCommand;
|
||||
import com.cloud.agent.api.storage.CopyVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.CopyVolumeCommand;
|
||||
import com.cloud.agent.api.storage.CreateVolumeOVACommand;
|
||||
import com.cloud.agent.api.storage.CreateVolumeOVAAnswer;
|
||||
import com.cloud.agent.api.storage.PrepareOVAPackingAnswer;
|
||||
import com.cloud.agent.api.storage.PrepareOVAPackingCommand;
|
||||
import com.cloud.agent.api.storage.CreateAnswer;
|
||||
import com.cloud.agent.api.storage.CreateCommand;
|
||||
import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.CreateVolumeOVAAnswer;
|
||||
import com.cloud.agent.api.storage.CreateVolumeOVACommand;
|
||||
import com.cloud.agent.api.storage.DestroyCommand;
|
||||
import com.cloud.agent.api.storage.PrepareOVAPackingAnswer;
|
||||
import com.cloud.agent.api.storage.PrepareOVAPackingCommand;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeAnswer;
|
||||
|
|
@ -250,30 +277,6 @@ import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
|
|||
import com.vmware.vim25.VirtualMachinePowerState;
|
||||
import com.vmware.vim25.VirtualMachineRuntimeInfo;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.NDC;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.rmi.RemoteException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.TimeZone;
|
||||
import java.util.UUID;
|
||||
|
||||
|
||||
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService {
|
||||
|
|
@ -495,6 +498,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return execute((UnregisterVMCommand) cmd);
|
||||
} else if (clz == ScaleVmCommand.class) {
|
||||
return execute((ScaleVmCommand) cmd);
|
||||
} else if (clz == PvlanSetupCommand.class) {
|
||||
return execute((PvlanSetupCommand) cmd);
|
||||
} else {
|
||||
answer = Answer.createUnsupportedCommandAnswer(cmd);
|
||||
}
|
||||
|
|
@ -1037,7 +1042,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
String domrGIP = cmd.getAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP);
|
||||
String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
String gw = cmd.getAccessDetail(NetworkElementCommand.GUEST_NETWORK_GATEWAY);
|
||||
String cidr = Long.toString(NetUtils.getCidrSize(nic.getNetmask()));;
|
||||
String cidr = Long.toString(NetUtils.getCidrSize(nic.getNetmask()));
|
||||
String domainName = cmd.getNetworkDomain();
|
||||
String dns = cmd.getDefaultDns1();
|
||||
if (dns == null || dns.isEmpty()) {
|
||||
|
|
@ -1376,7 +1381,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
NicTO nicTo = cmd.getNic();
|
||||
VirtualDevice nic;
|
||||
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false);
|
||||
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, cmd.getVMType());;
|
||||
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
|
||||
String dvSwitchUuid;
|
||||
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
|
||||
|
|
@ -1643,7 +1648,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true);
|
||||
} else {
|
||||
networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
|
||||
vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup, null, false);
|
||||
vmMo.getRunningHost(), vlanId, null, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup, null, false);
|
||||
}
|
||||
|
||||
int nicIndex = allocPublicNicIndex(vmMo);
|
||||
|
|
@ -2537,7 +2542,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo));
|
||||
|
||||
boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus"));
|
||||
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus);
|
||||
VirtualMachine.Type vmType = cmd.getVirtualMachine().getType();
|
||||
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType);
|
||||
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
|
||||
String dvSwitchUuid;
|
||||
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
|
||||
|
|
@ -2719,16 +2725,28 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return poolMors;
|
||||
}
|
||||
|
||||
|
||||
private String getPvlanInfo(NicTO nicTo) {
|
||||
if (nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) {
|
||||
return NetUtils.getIsolatedPvlanFromUri(nicTo.getBroadcastUri());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getVlanInfo(NicTO nicTo, String defaultVlan) {
|
||||
if (nicTo.getBroadcastType() == BroadcastDomainType.Native) {
|
||||
return defaultVlan;
|
||||
}
|
||||
|
||||
if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan) {
|
||||
if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan || nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) {
|
||||
if (nicTo.getBroadcastUri() != null) {
|
||||
if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan)
|
||||
// For vlan, the broadcast uri is of the form vlan://<vlanid>
|
||||
return nicTo.getBroadcastUri().getHost();
|
||||
else
|
||||
// for pvlan, the broacast uri will be of the form pvlan://<vlanid>-i<pvlanid>
|
||||
return NetUtils.getPrimaryPvlanFromUri(nicTo.getBroadcastUri());
|
||||
} else {
|
||||
s_logger.warn("BroadcastType is not claimed as VLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan);
|
||||
s_logger.warn("BroadcastType is not claimed as VLAN or PVLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan);
|
||||
return defaultVlan;
|
||||
}
|
||||
}
|
||||
|
|
@ -2737,7 +2755,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return defaultVlan;
|
||||
}
|
||||
|
||||
private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus) throws Exception {
|
||||
private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception {
|
||||
Pair<String, String> switchName;
|
||||
TrafficType trafficType;
|
||||
VirtualSwitchType switchType;
|
||||
|
|
@ -2761,12 +2779,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix);
|
||||
|
||||
if (VirtualSwitchType.StandardVirtualSwitch == switchType) {
|
||||
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
|
||||
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout,
|
||||
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix,
|
||||
hostMo, getVlanInfo(nicTo, switchName.second()), nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout,
|
||||
!namePrefix.startsWith("cloud.private"));
|
||||
}
|
||||
else {
|
||||
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
|
||||
String vlanId = getVlanInfo(nicTo, switchName.second());
|
||||
String svlanId = null;
|
||||
boolean pvlannetwork = (getPvlanInfo(nicTo) == null)?false:true;
|
||||
if (vmType != null && vmType.equals(VirtualMachine.Type.DomainRouter) && pvlannetwork) {
|
||||
// plumb this network to the promiscuous vlan.
|
||||
svlanId = vlanId;
|
||||
} else {
|
||||
// plumb this network to the isolated vlan.
|
||||
svlanId = getPvlanInfo(nicTo);
|
||||
}
|
||||
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, vlanId, svlanId,
|
||||
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, switchType, _portsPerDvPortGroup, nicTo.getGateway(), configureVServiceInNexus);
|
||||
}
|
||||
|
||||
|
|
@ -3253,7 +3281,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
NicTO[] nics = vm.getNics();
|
||||
for (NicTO nic : nics) {
|
||||
// prepare network on the host
|
||||
prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false);
|
||||
prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false, cmd.getVirtualMachine().getType());
|
||||
}
|
||||
|
||||
String secStoreUrl = mgr.getSecondaryStorageStoreUrl(Long.parseLong(_dcId));
|
||||
|
|
@ -3917,6 +3945,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
protected Answer execute(PvlanSetupCommand cmd) {
|
||||
// Pvlan related operations are performed in the start/stop command paths
|
||||
// for vmware. This function is implemented to support mgmt layer code
|
||||
// that issue this command. Note that pvlan operations are supported only
|
||||
// in Distributed Virtual Switch environments for vmware deployments.
|
||||
return new Answer(cmd, true, "success");
|
||||
}
|
||||
|
||||
protected Answer execute(UnregisterVMCommand cmd){
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Executing resource UnregisterVMCommand: " + _gson.toJson(cmd));
|
||||
|
|
@ -4134,6 +4170,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CreateVolumeOVAAnswer execute(CreateVolumeOVACommand cmd) {
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Executing resource CreateVolumeOVACommand: " + _gson.toJson(cmd));
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@
|
|||
<module>affinity-group-processors/host-anti-affinity</module>
|
||||
<module>deployment-planners/user-concentrated-pod</module>
|
||||
<module>deployment-planners/user-dispersing</module>
|
||||
<module>deployment-planners/implicit-dedication</module>
|
||||
<module>host-allocators/random</module>
|
||||
<module>hypervisors/ovm</module>
|
||||
<module>hypervisors/xen</module>
|
||||
|
|
|
|||
|
|
@ -90,6 +90,11 @@
|
|||
<artifactId>cloud-api</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-framework-ipc</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-framework-events</artifactId>
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
@Inject ConsoleProxyDao _consoleProxyDao = null;
|
||||
@Inject SecondaryStorageVmDao _secStorgaeVmDao = null;
|
||||
@Inject ConfigurationDao _configDao = null;
|
||||
@Inject GuestOSDao _guestOSDao = null;
|
||||
@Inject GuestOSDao _guestOSDao = null;
|
||||
@Inject GuestOSCategoryDao _guestOSCategoryDao = null;
|
||||
@Inject VMInstanceDao _vmInstanceDao = null;
|
||||
@Inject ResourceManager _resourceMgr;
|
||||
|
|
@ -88,17 +88,17 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
boolean _checkHvm = true;
|
||||
protected String _allocationAlgorithm = "random";
|
||||
@Inject CapacityManager _capacityMgr;
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
|
||||
ExcludeList avoid, int returnUpTo) {
|
||||
return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) {
|
||||
|
||||
|
||||
long dcId = plan.getDataCenterId();
|
||||
Long podId = plan.getPodId();
|
||||
Long clusterId = plan.getClusterId();
|
||||
|
|
@ -110,19 +110,19 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
// FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not
|
||||
return new ArrayList<Host>();
|
||||
}
|
||||
|
||||
|
||||
if(s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId );
|
||||
}
|
||||
|
||||
|
||||
String hostTagOnOffering = offering.getHostTag();
|
||||
String hostTagOnTemplate = template.getTemplateTag();
|
||||
|
||||
|
||||
boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false;
|
||||
boolean hasTemplateTag = hostTagOnTemplate != null ? true : false;
|
||||
|
||||
|
||||
List<HostVO> clusterHosts = new ArrayList<HostVO>();
|
||||
|
||||
|
||||
String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
|
||||
if (haVmTag != null) {
|
||||
clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, haVmTag);
|
||||
|
|
@ -133,31 +133,31 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
List<HostVO> hostsMatchingOfferingTag = new ArrayList<HostVO>();
|
||||
List<HostVO> hostsMatchingTemplateTag = new ArrayList<HostVO>();
|
||||
if (hasSvcOfferingTag){
|
||||
if (s_logger.isDebugEnabled()){
|
||||
if (s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering);
|
||||
}
|
||||
hostsMatchingOfferingTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering);
|
||||
if (s_logger.isDebugEnabled()){
|
||||
if (s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hasTemplateTag){
|
||||
if (s_logger.isDebugEnabled()){
|
||||
if (s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate);
|
||||
}
|
||||
hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);
|
||||
if (s_logger.isDebugEnabled()){
|
||||
hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);
|
||||
if (s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Hosts with tag '" + hostTagOnTemplate+"' are:" + hostsMatchingTemplateTag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (hasSvcOfferingTag && hasTemplateTag){
|
||||
hostsMatchingOfferingTag.retainAll(hostsMatchingTemplateTag);
|
||||
clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);
|
||||
if (s_logger.isDebugEnabled()){
|
||||
clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);
|
||||
if (s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Found "+ hostsMatchingOfferingTag.size() +" Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag);
|
||||
}
|
||||
|
||||
|
||||
clusterHosts = hostsMatchingOfferingTag;
|
||||
} else {
|
||||
if (hasSvcOfferingTag){
|
||||
|
|
@ -168,7 +168,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// add all hosts that we are not considering to the avoid list
|
||||
List<HostVO> allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null);
|
||||
allhostsInCluster.removeAll(clusterHosts);
|
||||
for (HostVO host : allhostsInCluster) {
|
||||
avoid.addHost(host.getId());
|
||||
}
|
||||
|
||||
return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account);
|
||||
}
|
||||
|
||||
|
|
@ -226,11 +233,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
}else if(_allocationAlgorithm.equals("userdispersing")){
|
||||
hosts = reorderHostsByNumberOfVms(plan, hosts, account);
|
||||
}
|
||||
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: "+hosts);
|
||||
}
|
||||
|
||||
|
||||
// We will try to reorder the host lists such that we give priority to hosts that have
|
||||
// the minimums to support a VM's requirements
|
||||
hosts = prioritizeHosts(template, hosts);
|
||||
|
|
@ -242,7 +249,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize());
|
||||
}
|
||||
|
||||
|
||||
List<Host> suitableHosts = new ArrayList<Host>();
|
||||
|
||||
for (HostVO host : hosts) {
|
||||
|
|
@ -255,7 +262,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
//find number of guest VMs occupying capacity on this host.
|
||||
if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -285,13 +292,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Not using host " + host.getId() + "; numCpusGood: " + numCpusGood + "; cpuFreqGood: " + cpuFreqGood + ", host has capacity?" + hostHasCapacity);
|
||||
}
|
||||
avoid.addHost(host.getId());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Host Allocator returning "+suitableHosts.size() +" suitable hosts");
|
||||
}
|
||||
|
||||
|
||||
return suitableHosts;
|
||||
}
|
||||
|
||||
|
|
@ -302,26 +310,26 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
long dcId = plan.getDataCenterId();
|
||||
Long podId = plan.getPodId();
|
||||
Long clusterId = plan.getClusterId();
|
||||
|
||||
|
||||
List<Long> hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId());
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("List of hosts in ascending order of number of VMs: "+ hostIdsByVmCount);
|
||||
}
|
||||
|
||||
|
||||
//now filter the given list of Hosts by this ordered list
|
||||
Map<Long, HostVO> hostMap = new HashMap<Long, HostVO>();
|
||||
Map<Long, HostVO> hostMap = new HashMap<Long, HostVO>();
|
||||
for (HostVO host : hosts) {
|
||||
hostMap.put(host.getId(), host);
|
||||
}
|
||||
List<Long> matchingHostIds = new ArrayList<Long>(hostMap.keySet());
|
||||
|
||||
|
||||
hostIdsByVmCount.retainAll(matchingHostIds);
|
||||
|
||||
|
||||
List<HostVO> reorderedHosts = new ArrayList<HostVO>();
|
||||
for(Long id: hostIdsByVmCount){
|
||||
reorderedHosts.add(hostMap.get(id));
|
||||
}
|
||||
|
||||
|
||||
return reorderedHosts;
|
||||
}
|
||||
|
||||
|
|
@ -336,13 +344,13 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
if (template == null) {
|
||||
return hosts;
|
||||
}
|
||||
|
||||
|
||||
// Determine the guest OS category of the template
|
||||
String templateGuestOSCategory = getTemplateGuestOSCategory(template);
|
||||
|
||||
|
||||
List<HostVO> prioritizedHosts = new ArrayList<HostVO>();
|
||||
List<HostVO> noHvmHosts = new ArrayList<HostVO>();
|
||||
|
||||
|
||||
// If a template requires HVM and a host doesn't support HVM, remove it from consideration
|
||||
List<HostVO> hostsToCheck = new ArrayList<HostVO>();
|
||||
if (template.isRequiresHvm()) {
|
||||
|
|
@ -356,7 +364,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
} else {
|
||||
hostsToCheck.addAll(hosts);
|
||||
}
|
||||
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
if (noHvmHosts.size() > 0) {
|
||||
s_logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template +" as they are not HVM enabled");
|
||||
|
|
@ -376,10 +384,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
lowPriorityHosts.add(host);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
hostsToCheck.removeAll(highPriorityHosts);
|
||||
hostsToCheck.removeAll(lowPriorityHosts);
|
||||
|
||||
|
||||
// Prioritize the remaining hosts by HVM capability
|
||||
for (HostVO host : hostsToCheck) {
|
||||
if (!template.isRequiresHvm() && !hostSupportsHVM(host)) {
|
||||
|
|
@ -390,21 +398,21 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
prioritizedHosts.add(host);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Merge the lists
|
||||
prioritizedHosts.addAll(0, highPriorityHosts);
|
||||
prioritizedHosts.addAll(lowPriorityHosts);
|
||||
|
||||
|
||||
return prioritizedHosts;
|
||||
}
|
||||
|
||||
|
||||
protected boolean hostSupportsHVM(HostVO host) {
|
||||
if ( !_checkHvm ) {
|
||||
return true;
|
||||
}
|
||||
// Determine host capabilities
|
||||
String caps = host.getCapabilities();
|
||||
|
||||
|
||||
if (caps != null) {
|
||||
String[] tokens = caps.split(",");
|
||||
for (String token : tokens) {
|
||||
|
|
@ -413,24 +421,24 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
protected String getHostGuestOSCategory(HostVO host) {
|
||||
DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), "guest.os.category.id");
|
||||
if (hostDetail != null) {
|
||||
String guestOSCategoryIdString = hostDetail.getValue();
|
||||
long guestOSCategoryId;
|
||||
|
||||
|
||||
try {
|
||||
guestOSCategoryId = Long.parseLong(guestOSCategoryIdString);
|
||||
} catch (Exception e) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId);
|
||||
|
||||
|
||||
if (guestOSCategory != null) {
|
||||
return guestOSCategory.getName();
|
||||
} else {
|
||||
|
|
@ -440,7 +448,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected String getTemplateGuestOSCategory(VMTemplateVO template) {
|
||||
long guestOSId = template.getGuestOSId();
|
||||
GuestOSVO guestOS = _guestOSDao.findById(guestOSId);
|
||||
|
|
@ -455,7 +463,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
Map<String, String> configs = _configDao.getConfiguration(params);
|
||||
String opFactor = configs.get("cpu.overprovisioning.factor");
|
||||
_factor = NumbersUtil.parseFloat(opFactor, 1);
|
||||
|
||||
|
||||
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
|
||||
if (allocationAlgorithm != null) {
|
||||
_allocationAlgorithm = allocationAlgorithm;
|
||||
|
|
|
|||
|
|
@ -73,6 +73,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase<ServiceOfferingJo
|
|||
offeringResponse.setDomainId(offering.getDomainUuid());
|
||||
offeringResponse.setNetworkRate(offering.getRateMbps());
|
||||
offeringResponse.setHostTag(offering.getHostTag());
|
||||
offeringResponse.setDeploymentPlanner(offering.getDeploymentPlanner());
|
||||
offeringResponse.setObjectName("serviceoffering");
|
||||
|
||||
return offeringResponse;
|
||||
|
|
|
|||
|
|
@ -106,6 +106,9 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit
|
|||
@Column(name="domain_path")
|
||||
private String domainPath = null;
|
||||
|
||||
@Column(name = "deployment_planner")
|
||||
private String deploymentPlanner;
|
||||
|
||||
|
||||
public ServiceOfferingJoinVO() {
|
||||
}
|
||||
|
|
@ -307,5 +310,13 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit
|
|||
this.vm_type = vm_type;
|
||||
}
|
||||
|
||||
public String getDeploymentPlanner() {
|
||||
return deploymentPlanner;
|
||||
}
|
||||
|
||||
public void setDeploymentPlanner(String deploymentPlanner) {
|
||||
this.deploymentPlanner = deploymentPlanner;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ import javax.ejb.Local;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.framework.messagebus.MessageBus;
|
||||
import org.apache.cloudstack.framework.messagebus.PublishScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
|
@ -77,11 +79,14 @@ import com.cloud.utils.db.DB;
|
|||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.fsm.StateListener;
|
||||
import com.cloud.vm.UserVmDetailVO;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.Event;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.UserVmDetailsDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import com.cloud.vm.snapshot.VMSnapshot;
|
||||
import com.cloud.vm.snapshot.VMSnapshotVO;
|
||||
|
|
@ -121,6 +126,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||
protected VMSnapshotDao _vmSnapshotDao;
|
||||
@Inject
|
||||
protected UserVmDao _userVMDao;
|
||||
@Inject
|
||||
protected UserVmDetailsDao _userVmDetailsDao;
|
||||
|
||||
@Inject
|
||||
ClusterDetailsDao _clusterDetailsDao;
|
||||
|
|
@ -132,6 +139,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||
long _extraBytesPerVolume = 0;
|
||||
private float _storageOverProvisioningFactor = 1.0f;
|
||||
|
||||
@Inject
|
||||
MessageBus _messageBus;
|
||||
|
||||
private static final String MESSAGE_RESERVED_CAPACITY_FREED_FLAG = "Message.ReservedCapacityFreed.Flag";
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
_vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600);
|
||||
|
|
@ -552,6 +564,20 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
|
||||
reservedMemory += so.getRamSize() * 1024L * 1024L;
|
||||
reservedCpu += so.getCpu() * so.getSpeed();
|
||||
} else {
|
||||
// signal if not done already, that the VM has been stopped for skip.counting.hours,
|
||||
// hence capacity will not be reserved anymore.
|
||||
UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG);
|
||||
if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) {
|
||||
_messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm);
|
||||
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
UserVmVO userVM = _userVMDao.findById(vm.getId());
|
||||
_userVMDao.loadDetails(userVM);
|
||||
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true");
|
||||
_userVMDao.saveDetails(userVM);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -688,6 +714,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||
allocateVmCapacity(vm, fromLastHost);
|
||||
}
|
||||
|
||||
if (newState == State.Stopped) {
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
|
||||
UserVmVO userVM = _userVMDao.findById(vm.getId());
|
||||
_userVMDao.loadDetails(userVM);
|
||||
// free the message sent flag if it exists
|
||||
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "false");
|
||||
_userVMDao.saveDetails(userVM);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -214,6 +214,8 @@ public enum Config {
|
|||
SecStorageProxy("Advanced", AgentManager.class, String.class, "secstorage.proxy", null, "http proxy used by ssvm, in http://username:password@proxyserver:port format", null),
|
||||
AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null),
|
||||
AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null),
|
||||
HostReservationReleasePeriod("Advanced", ManagementServer.class, Integer.class, "host.reservation.release.period", "300000", "The interval in milliseconds between host reservation release checks", null),
|
||||
|
||||
|
||||
// LB HealthCheck Interval.
|
||||
LBHealthCheck("Advanced", ManagementServer.class, String.class, "healthcheck.update.interval", "600",
|
||||
|
|
@ -235,6 +237,7 @@ public enum Config {
|
|||
ApplyAllocationAlgorithmToPods("Advanced", ManagementServer.class, Boolean.class, "apply.allocation.algorithm.to.pods", "false", "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation", "true,false"),
|
||||
VmUserDispersionWeight("Advanced", ManagementServer.class, Float.class, "vm.user.dispersion.weight", "1", "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 - weight of user dispersion)", null),
|
||||
VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null),
|
||||
VmDeploymentPlanner("Advanced", ManagementServer.class, String.class, "vm.deployment.planner", "FirstFitPlanner", "'FirstFitPlanner', 'UserDispersingPlanner', 'UserConcentratedPodPlanner': DeploymentPlanner heuristic that will be used for VM deployment.", null),
|
||||
EndpointeUrl("Advanced", ManagementServer.class, String.class, "endpointe.url", "http://localhost:8080/client/api", "Endpointe Url", null),
|
||||
ElasticLoadBalancerEnabled("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.enabled", "false", "Whether the load balancing service is enabled for basic zones", "true,false"),
|
||||
ElasticLoadBalancerNetwork("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.network", "guest", "Whether the elastic load balancing service public ips are taken from the public or guest network", "guest,public"),
|
||||
|
|
|
|||
|
|
@ -79,10 +79,12 @@ public interface ConfigurationManager extends ConfigurationService, Manager {
|
|||
* TODO
|
||||
* @param id
|
||||
* @param useVirtualNetwork
|
||||
* @param deploymentPlanner
|
||||
* @param details
|
||||
* @return ID
|
||||
*/
|
||||
ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired,
|
||||
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate);
|
||||
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner, Map<String, String> details);
|
||||
|
||||
/**
|
||||
* Creates a new disk offering
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
|
|
@ -39,7 +39,6 @@ import javax.naming.NamingException;
|
|||
import javax.naming.directory.DirContext;
|
||||
import javax.naming.directory.InitialDirContext;
|
||||
|
||||
|
||||
import com.cloud.dc.*;
|
||||
import com.cloud.dc.dao.*;
|
||||
import com.cloud.user.*;
|
||||
|
|
@ -105,7 +104,6 @@ import com.cloud.dc.dao.DcDetailsDao;
|
|||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.dc.dao.PodVlanMapDao;
|
||||
import com.cloud.dc.dao.VlanDao;
|
||||
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.domain.Domain;
|
||||
import com.cloud.domain.DomainVO;
|
||||
|
|
@ -162,8 +160,10 @@ import com.cloud.org.Grouping.AllocationState;
|
|||
import com.cloud.projects.Project;
|
||||
import com.cloud.projects.ProjectManager;
|
||||
import com.cloud.server.ConfigurationServer;
|
||||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.SwiftVO;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
|
|
@ -276,6 +276,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
@Inject
|
||||
ServiceOfferingDao _serviceOfferingDao;
|
||||
@Inject
|
||||
ServiceOfferingDetailsDao _serviceOfferingDetailsDao;
|
||||
@Inject
|
||||
DiskOfferingDao _diskOfferingDao;
|
||||
@Inject
|
||||
NetworkOfferingDao _networkOfferingDao;
|
||||
|
|
@ -302,7 +304,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
@Inject
|
||||
AlertManager _alertMgr;
|
||||
// @com.cloud.utils.component.Inject(adapter = SecurityChecker.class)
|
||||
@Inject
|
||||
@Inject
|
||||
List<SecurityChecker> _secChecker;
|
||||
|
||||
@Inject
|
||||
|
|
@ -346,6 +348,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
@Inject
|
||||
NicIpAliasDao _nicIpAliasDao;
|
||||
|
||||
@Inject
|
||||
public ManagementService _mgr;
|
||||
|
||||
// FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao?
|
||||
@Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao;
|
||||
|
||||
|
|
@ -356,11 +361,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
@Override
|
||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||
String maxVolumeSizeInGbString = _configDao.getValue(Config.MaxVolumeSize.key());
|
||||
_maxVolumeSizeInGb = NumbersUtil.parseInt(maxVolumeSizeInGbString,
|
||||
_maxVolumeSizeInGb = NumbersUtil.parseInt(maxVolumeSizeInGbString,
|
||||
Integer.parseInt(Config.MaxVolumeSize.getDefaultValue()));
|
||||
|
||||
String defaultPageSizeString = _configDao.getValue(Config.DefaultPageSize.key());
|
||||
_defaultPageSize = NumbersUtil.parseLong(defaultPageSizeString,
|
||||
_defaultPageSize = NumbersUtil.parseLong(defaultPageSizeString,
|
||||
Long.parseLong(Config.DefaultPageSize.getDefaultValue()));
|
||||
|
||||
populateConfigValuesForValidationSet();
|
||||
|
|
@ -920,7 +925,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
checkPodCidrSubnets(zoneId, podId, cidr);
|
||||
/*
|
||||
* Commenting out due to Bug 11593 - CIDR conflicts with zone when extending pod but not when creating it
|
||||
*
|
||||
*
|
||||
* checkCidrVlanOverlap(zoneId, cidr);
|
||||
*/
|
||||
}
|
||||
|
|
@ -1713,7 +1718,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (internalDns2 == null) {
|
||||
internalDns2 = zone.getInternalDns2();
|
||||
}
|
||||
|
||||
|
||||
if (guestCidr == null) {
|
||||
guestCidr = zone.getGuestNetworkCidr();
|
||||
}
|
||||
|
|
@ -2034,19 +2039,38 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
throw new InvalidParameterValueException("Network rate can be specified only for non-System offering and system offerings having \"domainrouter\" systemvmtype");
|
||||
}
|
||||
|
||||
return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(),
|
||||
localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate());
|
||||
if (cmd.getDeploymentPlanner() != null) {
|
||||
List<String> planners = _mgr.listDeploymentPlanners();
|
||||
if (planners != null && !planners.isEmpty()) {
|
||||
if (!planners.contains(cmd.getDeploymentPlanner())) {
|
||||
throw new InvalidParameterValueException(
|
||||
"Invalid name for Deployment Planner specified, please use listDeploymentPlanners to get the valid set");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidParameterValueException("No deployment planners found");
|
||||
}
|
||||
}
|
||||
|
||||
return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(),
|
||||
cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(),
|
||||
localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(),
|
||||
cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner(), cmd.getDetails());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering")
|
||||
public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText,
|
||||
boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) {
|
||||
public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type,
|
||||
String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired,
|
||||
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag,
|
||||
Integer networkRate, String deploymentPlanner, Map<String, String> details) {
|
||||
tags = cleanupTags(tags);
|
||||
ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type,
|
||||
domainId, hostTag);
|
||||
domainId, hostTag, deploymentPlanner);
|
||||
|
||||
if ((offering = _serviceOfferingDao.persist(offering)) != null) {
|
||||
if (details != null) {
|
||||
_serviceOfferingDetailsDao.persist(offering.getId(), details);
|
||||
}
|
||||
UserContext.current().setEventDetails("Service offering id=" + offering.getId());
|
||||
return offering;
|
||||
} else {
|
||||
|
|
@ -2328,9 +2352,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
String endIPv6 = cmd.getEndIpv6();
|
||||
String ip6Gateway = cmd.getIp6Gateway();
|
||||
String ip6Cidr = cmd.getIp6Cidr();
|
||||
|
||||
|
||||
Account vlanOwner = null;
|
||||
|
||||
|
||||
boolean ipv4 = (startIP != null);
|
||||
boolean ipv6 = (startIPv6 != null);
|
||||
|
||||
|
|
@ -2387,7 +2411,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
} else if (ipv6) {
|
||||
throw new InvalidParameterValueException("Only support IPv6 on extending existed network");
|
||||
}
|
||||
|
||||
|
||||
// Verify that zone exists
|
||||
DataCenterVO zone = _zoneDao.findById(zoneId);
|
||||
if (zone == null) {
|
||||
|
|
@ -2434,18 +2458,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// Check if zone is enabled
|
||||
Account caller = UserContext.current().getCaller();
|
||||
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) {
|
||||
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId);
|
||||
}
|
||||
}
|
||||
|
||||
if (zone.isSecurityGroupEnabled() && zone.getNetworkType() != DataCenter.NetworkType.Basic && forVirtualNetwork) {
|
||||
throw new InvalidParameterValueException("Can't add virtual ip range into a zone with security group enabled");
|
||||
}
|
||||
|
||||
|
||||
// If networkId is not specified, and vlan is Virtual or Direct Untagged, try to locate default networks
|
||||
if (forVirtualNetwork) {
|
||||
if (network == null) {
|
||||
|
|
@ -2604,35 +2628,35 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
String startIP, String endIP, String vlanGateway, String vlanNetmask,
|
||||
String vlanId, Account vlanOwner, String startIPv6, String endIPv6, String vlanIp6Gateway, String vlanIp6Cidr) {
|
||||
Network network = _networkModel.getNetwork(networkId);
|
||||
|
||||
|
||||
boolean ipv4 = false, ipv6 = false;
|
||||
|
||||
|
||||
if (startIP != null) {
|
||||
ipv4 = true;
|
||||
}
|
||||
|
||||
|
||||
if (startIPv6 != null) {
|
||||
ipv6 = true;
|
||||
}
|
||||
|
||||
|
||||
if (!ipv4 && !ipv6) {
|
||||
throw new InvalidParameterValueException("Please specify IPv4 or IPv6 address.");
|
||||
}
|
||||
|
||||
|
||||
//Validate the zone
|
||||
DataCenterVO zone = _zoneDao.findById(zoneId);
|
||||
if (zone == null) {
|
||||
throw new InvalidParameterValueException("Please specify a valid zone.");
|
||||
}
|
||||
|
||||
|
||||
// ACL check
|
||||
checkZoneAccess(UserContext.current().getCaller(), zone);
|
||||
|
||||
|
||||
//Validate the physical network
|
||||
if (_physicalNetworkDao.findById(physicalNetworkId) == null) {
|
||||
throw new InvalidParameterValueException("Please specify a valid physical network id");
|
||||
}
|
||||
|
||||
|
||||
//Validate the pod
|
||||
if (podId != null) {
|
||||
Pod pod = _podDao.findById(podId);
|
||||
|
|
@ -2644,11 +2668,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
//pod vlans can be created in basic zone only
|
||||
if (zone.getNetworkType() != NetworkType.Basic || network.getTrafficType() != TrafficType.Guest) {
|
||||
throw new InvalidParameterValueException("Pod id can be specified only for the networks of type "
|
||||
+ TrafficType.Guest + " in zone of type " + NetworkType.Basic);
|
||||
throw new InvalidParameterValueException("Pod id can be specified only for the networks of type "
|
||||
+ TrafficType.Guest + " in zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//1) if vlan is specified for the guest network range, it should be the same as network's vlan
|
||||
//2) if vlan is missing, default it to the guest network's vlan
|
||||
if (network.getTrafficType() == TrafficType.Guest) {
|
||||
|
|
@ -2660,7 +2684,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
//For pvlan
|
||||
networkVlanId = networkVlanId.split("-")[0];
|
||||
}
|
||||
|
||||
|
||||
if (vlanId != null) {
|
||||
// if vlan is specified, throw an error if it's not equal to network's vlanId
|
||||
if (networkVlanId != null && !networkVlanId.equalsIgnoreCase(vlanId)) {
|
||||
|
|
@ -2673,14 +2697,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
//vlan id is required for public network
|
||||
throw new InvalidParameterValueException("Vlan id is required when add ip range to the public network");
|
||||
}
|
||||
|
||||
|
||||
if (vlanId == null) {
|
||||
vlanId = Vlan.UNTAGGED;
|
||||
}
|
||||
|
||||
VlanType vlanType = forVirtualNetwork ? VlanType.VirtualNetwork : VlanType.DirectAttached;
|
||||
|
||||
|
||||
|
||||
|
||||
if (vlanOwner != null && zone.getNetworkType() != NetworkType.Advanced) {
|
||||
throw new InvalidParameterValueException("Vlan owner can be defined only in the zone of type " + NetworkType.Advanced);
|
||||
}
|
||||
|
|
@ -2696,7 +2720,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
throw new InvalidParameterValueException("Please specify a valid netmask");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (ipv6) {
|
||||
if (!NetUtils.isValidIpv6(vlanIp6Gateway)) {
|
||||
throw new InvalidParameterValueException("Please specify a valid IPv6 gateway");
|
||||
|
|
@ -2751,7 +2775,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
List<VlanVO> vlans = _vlanDao.listByZone(zone.getId());
|
||||
for (VlanVO vlan : vlans) {
|
||||
String otherVlanGateway = vlan.getVlanGateway();
|
||||
// Continue if it's not IPv4
|
||||
// Continue if it's not IPv4
|
||||
if (otherVlanGateway == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
@ -2787,14 +2811,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
String ipv6Range = null;
|
||||
if (ipv6) {
|
||||
ipv6Range = startIPv6;
|
||||
if (endIPv6 != null) {
|
||||
ipv6Range += "-" + endIPv6;
|
||||
}
|
||||
|
||||
|
||||
List<VlanVO> vlans = _vlanDao.listByZone(zone.getId());
|
||||
for (VlanVO vlan : vlans) {
|
||||
if (vlan.getIp6Gateway() == null) {
|
||||
|
|
@ -2820,14 +2844,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
|
||||
String ipRange = null;
|
||||
|
||||
|
||||
if (ipv4) {
|
||||
ipRange = startIP;
|
||||
if (endIP != null) {
|
||||
ipRange += "-" + endIP;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Everything was fine, so persist the VLAN
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
txn.start();
|
||||
|
|
@ -2839,7 +2863,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
// IPv6 use a used ip map, is different from ipv4, no need to save public ip range
|
||||
if (ipv4) {
|
||||
if (!savePublicIPRange(startIP, endIP, zoneId, vlan.getId(), networkId, physicalNetworkId)) {
|
||||
throw new CloudRuntimeException("Failed to save IPv4 range. Please contact Cloud Support.");
|
||||
throw new CloudRuntimeException("Failed to save IPv4 range. Please contact Cloud Support.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2875,7 +2899,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (vlanRange == null) {
|
||||
throw new InvalidParameterValueException("Please specify a valid IP range id.");
|
||||
}
|
||||
|
||||
|
||||
boolean isAccountSpecific = false;
|
||||
List<AccountVlanMapVO> acctVln = _accountVlanMapDao.listAccountVlanMapsByVlan(vlanRange.getId());
|
||||
// Check for account wide pool. It will have an entry for account_vlan_map.
|
||||
|
|
@ -2888,7 +2912,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
List<IPAddressVO> ips = _publicIpAddressDao.listByVlanId(vlanDbId);
|
||||
boolean success = true;
|
||||
if (allocIpCount > 0) {
|
||||
if (isAccountSpecific) {
|
||||
if (isAccountSpecific) {
|
||||
try {
|
||||
vlanRange = _vlanDao.acquireInLockTable(vlanDbId, 30);
|
||||
if (vlanRange == null) {
|
||||
|
|
@ -2901,7 +2925,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
|
||||
for (IPAddressVO ip : ips) {
|
||||
if (ip.isOneToOneNat()) {
|
||||
throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId +
|
||||
throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId +
|
||||
" as ip " + ip + " belonging to the range is used for static nat purposes. Cleanup the rules first");
|
||||
}
|
||||
|
||||
|
|
@ -2910,9 +2934,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
" as ip " + ip + " belonging to the range is a source nat ip for the network id=" + ip.getSourceNetworkId() +
|
||||
". IP range with the source nat ip address can be removed either as a part of Network, or account removal");
|
||||
}
|
||||
|
||||
|
||||
if (_firewallDao.countRulesByIpId(ip.getId()) > 0) {
|
||||
throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId +
|
||||
throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId +
|
||||
" as ip " + ip + " belonging to the range has firewall rules applied. Cleanup the rules first");
|
||||
}
|
||||
//release public ip address here
|
||||
|
|
@ -3268,7 +3292,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@DB
|
||||
protected boolean savePublicIPRange(String startIP, String endIP, long zoneId, long vlanDbId, long sourceNetworkid, long physicalNetworkId) {
|
||||
|
|
@ -3471,7 +3495,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
private boolean validPod(long podId) {
|
||||
return (_podDao.findById(podId) != null);
|
||||
}
|
||||
|
|
@ -3690,7 +3714,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (provider == Provider.JuniperSRX || provider == Provider.CiscoVnmc) {
|
||||
firewallProvider = provider;
|
||||
}
|
||||
|
||||
|
||||
if ((service == Service.PortForwarding || service == Service.StaticNat) && provider == Provider.VirtualRouter){
|
||||
firewallProvider = Provider.VirtualRouter;
|
||||
}
|
||||
|
|
@ -3890,7 +3914,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (!specifyVlan && type == GuestType.Shared) {
|
||||
throw new InvalidParameterValueException("SpecifyVlan should be true if network offering's type is " + type);
|
||||
}
|
||||
|
||||
|
||||
//specifyIpRanges should always be true for Shared networks
|
||||
//specifyIpRanges can only be true for Isolated networks with no Source Nat service
|
||||
if (specifyIpRanges) {
|
||||
|
|
@ -3914,7 +3938,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (availability == NetworkOffering.Availability.Required) {
|
||||
boolean canOffBeRequired = (type == GuestType.Isolated && serviceProviderMap.containsKey(Service.SourceNat));
|
||||
if (!canOffBeRequired) {
|
||||
throw new InvalidParameterValueException("Availability can be " + NetworkOffering.Availability.Required
|
||||
throw new InvalidParameterValueException("Availability can be " + NetworkOffering.Availability.Required
|
||||
+ " only for networkOfferings of type " + GuestType.Isolated + " and with "
|
||||
+ Service.SourceNat.getName() + " enabled");
|
||||
}
|
||||
|
|
@ -3922,11 +3946,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
// only one network offering in the system can be Required
|
||||
List<NetworkOfferingVO> offerings = _networkOfferingDao.listByAvailability(Availability.Required, false);
|
||||
if (!offerings.isEmpty()) {
|
||||
throw new InvalidParameterValueException("System already has network offering id=" + offerings.get(0).getId()
|
||||
throw new InvalidParameterValueException("System already has network offering id=" + offerings.get(0).getId()
|
||||
+ " with availability " + Availability.Required);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
boolean dedicatedLb = false;
|
||||
boolean elasticLb = false;
|
||||
boolean sharedSourceNat = false;
|
||||
|
|
@ -3938,7 +3962,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
boolean internalLb = false;
|
||||
if (serviceCapabilityMap != null && !serviceCapabilityMap.isEmpty()) {
|
||||
Map<Capability, String> lbServiceCapabilityMap = serviceCapabilityMap.get(Service.Lb);
|
||||
|
||||
|
||||
if ((lbServiceCapabilityMap != null) && (!lbServiceCapabilityMap.isEmpty())) {
|
||||
String isolationCapability = lbServiceCapabilityMap.get(Capability.SupportedLBIsolation);
|
||||
if (isolationCapability != null) {
|
||||
|
|
@ -3952,7 +3976,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (param != null) {
|
||||
elasticLb = param.contains("true");
|
||||
}
|
||||
|
||||
|
||||
String inlineMode = lbServiceCapabilityMap.get(Capability.InlineMode);
|
||||
if (inlineMode != null) {
|
||||
_networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.Lb), Service.Lb, Capability.InlineMode, inlineMode);
|
||||
|
|
@ -3983,14 +4007,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if ((sourceNatServiceCapabilityMap != null) && (!sourceNatServiceCapabilityMap.isEmpty())) {
|
||||
String sourceNatType = sourceNatServiceCapabilityMap.get(Capability.SupportedSourceNatTypes);
|
||||
if (sourceNatType != null) {
|
||||
_networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat,
|
||||
_networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat,
|
||||
Capability.SupportedSourceNatTypes, sourceNatType);
|
||||
sharedSourceNat = sourceNatType.contains("perzone");
|
||||
}
|
||||
|
||||
String param = sourceNatServiceCapabilityMap.get(Capability.RedundantRouter);
|
||||
if (param != null) {
|
||||
_networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat,
|
||||
_networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat,
|
||||
Capability.RedundantRouter, param);
|
||||
redundantRouter = param.contains("true");
|
||||
}
|
||||
|
|
@ -4009,7 +4033,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
}
|
||||
|
||||
NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText, trafficType, systemOnly, specifyVlan,
|
||||
NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText, trafficType, systemOnly, specifyVlan,
|
||||
networkRate, multicastRate, isDefault, availability, tags, type, conserveMode, dedicatedLb,
|
||||
sharedSourceNat, redundantRouter, elasticIp, elasticLb, specifyIpRanges, inline, isPersistent, associatePublicIp, publicLb, internalLb);
|
||||
|
||||
|
|
@ -4041,7 +4065,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
_ntwkOffServiceMapDao.persist(offService);
|
||||
s_logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName());
|
||||
}
|
||||
|
||||
|
||||
if (vpcOff) {
|
||||
List<Service> supportedSvcs = new ArrayList<Service>();
|
||||
supportedSvcs.addAll(serviceProviderMap.keySet());
|
||||
|
|
@ -4251,7 +4275,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
// filter by supported services
|
||||
boolean listBySupportedServices = (supportedServicesStr != null && !supportedServicesStr.isEmpty() && !offerings.isEmpty());
|
||||
boolean checkIfProvidersAreEnabled = (zoneId != null);
|
||||
boolean parseOfferings = (listBySupportedServices || sourceNatSupported != null || checkIfProvidersAreEnabled
|
||||
boolean parseOfferings = (listBySupportedServices || sourceNatSupported != null || checkIfProvidersAreEnabled
|
||||
|| forVpc != null || network != null);
|
||||
|
||||
if (parseOfferings) {
|
||||
|
|
@ -4299,7 +4323,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
if (sourceNatSupported != null) {
|
||||
addOffering = addOffering && (_networkModel.areServicesSupportedByNetworkOffering(offering.getId(), Network.Service.SourceNat) == sourceNatSupported);
|
||||
}
|
||||
|
||||
|
||||
if (forVpc != null) {
|
||||
addOffering = addOffering && (isOfferingForVpc(offering) == forVpc.booleanValue());
|
||||
} else if (network != null){
|
||||
|
|
@ -4418,14 +4442,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
}
|
||||
if (availability == null) {
|
||||
throw new InvalidParameterValueException("Invalid value for Availability. Supported types: "
|
||||
throw new InvalidParameterValueException("Invalid value for Availability. Supported types: "
|
||||
+ Availability.Required + ", " + Availability.Optional);
|
||||
} else {
|
||||
if (availability == NetworkOffering.Availability.Required) {
|
||||
boolean canOffBeRequired = (offeringToUpdate.getGuestType() == GuestType.Isolated
|
||||
boolean canOffBeRequired = (offeringToUpdate.getGuestType() == GuestType.Isolated
|
||||
&& _networkModel.areServicesSupportedByNetworkOffering(offeringToUpdate.getId(), Service.SourceNat));
|
||||
if (!canOffBeRequired) {
|
||||
throw new InvalidParameterValueException("Availability can be " +
|
||||
throw new InvalidParameterValueException("Availability can be " +
|
||||
NetworkOffering.Availability.Required + " only for networkOfferings of type " + GuestType.Isolated + " and with "
|
||||
+ Service.SourceNat.getName() + " enabled");
|
||||
}
|
||||
|
|
@ -4433,7 +4457,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
// only one network offering in the system can be Required
|
||||
List<NetworkOfferingVO> offerings = _networkOfferingDao.listByAvailability(Availability.Required, false);
|
||||
if (!offerings.isEmpty() && offerings.get(0).getId() != offeringToUpdate.getId()) {
|
||||
throw new InvalidParameterValueException("System already has network offering id=" +
|
||||
throw new InvalidParameterValueException("System already has network offering id=" +
|
||||
offerings.get(0).getId() + " with availability " + Availability.Required);
|
||||
}
|
||||
}
|
||||
|
|
@ -4452,7 +4476,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
@ActionEvent(eventType = EventTypes.EVENT_ACCOUNT_MARK_DEFAULT_ZONE, eventDescription = "Marking account with the " +
|
||||
"default zone", async=true)
|
||||
public AccountVO markDefaultZone(String accountName, long domainId, long defaultZoneId) {
|
||||
|
||||
|
||||
// Check if the account exists
|
||||
Account account = _accountDao.findEnabledAccount(accountName, domainId);
|
||||
if (account == null) {
|
||||
|
|
@ -4466,9 +4490,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}
|
||||
|
||||
AccountVO acctForUpdate = _accountDao.findById(account.getId());
|
||||
|
||||
|
||||
acctForUpdate.setDefaultZoneId(defaultZoneId);
|
||||
|
||||
|
||||
if (_accountDao.update(account.getId(), acctForUpdate)) {
|
||||
UserContext.current().setEventDetails("Default zone id= " + defaultZoneId);
|
||||
return _accountDao.findById(account.getId());
|
||||
|
|
@ -4476,7 +4500,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Note: This method will be used for entity name validations in the coming
|
||||
// releases (place holder for now)
|
||||
private void validateEntityName(String str) {
|
||||
|
|
@ -4604,10 +4628,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
public ClusterVO getCluster(long id) {
|
||||
return _clusterDao.findById(id);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public AllocationState findClusterAllocationState(ClusterVO cluster){
|
||||
|
||||
|
||||
if(cluster.getAllocationState() == AllocationState.Disabled){
|
||||
return AllocationState.Disabled;
|
||||
}else if(ApiDBUtils.findPodById(cluster.getPodId()).getAllocationState() == AllocationState.Disabled){
|
||||
|
|
@ -4615,20 +4639,20 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
}else {
|
||||
DataCenterVO zone = ApiDBUtils.findZoneById(cluster.getDataCenterId());
|
||||
return zone.getAllocationState();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocationState findPodAllocationState(HostPodVO pod){
|
||||
|
||||
|
||||
if(pod.getAllocationState() == AllocationState.Disabled){
|
||||
return AllocationState.Disabled;
|
||||
}else {
|
||||
DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId());
|
||||
return zone.getAllocationState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private boolean allowIpRangeOverlap(VlanVO vlan, boolean forVirtualNetwork, long networkId) {
|
||||
// FIXME - delete restriction for virtual network in the future
|
||||
if (vlan.getVlanType() == VlanType.DirectAttached && !forVirtualNetwork) {
|
||||
|
|
|
|||
|
|
@ -1,84 +0,0 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.utils.component.AdapterBase;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
|
||||
public abstract class AbstractDeployPlannerSelector extends AdapterBase implements DeployPlannerSelector {
|
||||
protected Map<String, Object> params;
|
||||
protected String name;
|
||||
protected int runLevel;
|
||||
|
||||
@Inject
|
||||
protected ConfigurationDao _configDao;
|
||||
protected String _allocationAlgorithm = "random";
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setConfigParams(Map<String, Object> params) {
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getConfigParams() {
|
||||
return params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getRunLevel() {
|
||||
return runLevel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRunLevel(int level) {
|
||||
this.runLevel = level;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
_allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean start() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean stop() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -49,6 +49,7 @@ import com.cloud.dc.Pod;
|
|||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
|
|
@ -81,7 +82,7 @@ import com.cloud.vm.dao.UserVmDao;
|
|||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
@Local(value=DeploymentPlanner.class)
|
||||
public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
||||
public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner {
|
||||
private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class);
|
||||
@Inject protected HostDao _hostDao;
|
||||
@Inject protected DataCenterDao _dcDao;
|
||||
|
|
@ -103,28 +104,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
@Inject DataStoreManager dataStoreMgr;
|
||||
@Inject protected ClusterDetailsDao _clusterDetailsDao;
|
||||
|
||||
protected List<StoragePoolAllocator> _storagePoolAllocators;
|
||||
public List<StoragePoolAllocator> getStoragePoolAllocators() {
|
||||
return _storagePoolAllocators;
|
||||
}
|
||||
public void setStoragePoolAllocators(
|
||||
List<StoragePoolAllocator> _storagePoolAllocators) {
|
||||
this._storagePoolAllocators = _storagePoolAllocators;
|
||||
}
|
||||
|
||||
protected List<HostAllocator> _hostAllocators;
|
||||
public List<HostAllocator> getHostAllocators() {
|
||||
return _hostAllocators;
|
||||
}
|
||||
public void setHostAllocators(List<HostAllocator> _hostAllocators) {
|
||||
this._hostAllocators = _hostAllocators;
|
||||
}
|
||||
|
||||
protected String _allocationAlgorithm = "random";
|
||||
protected String _globalDeploymentPlanner = "FirstFitPlanner";
|
||||
|
||||
|
||||
@Override
|
||||
public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
public List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
DeploymentPlan plan, ExcludeList avoid)
|
||||
throws InsufficientServerCapacityException {
|
||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||
|
|
@ -138,136 +124,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
return null;
|
||||
}
|
||||
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
int cpu_requested = offering.getCpu() * offering.getSpeed();
|
||||
long ram_requested = offering.getRamSize() * 1024L * 1024L;
|
||||
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm);
|
||||
|
||||
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
|
||||
", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
|
||||
|
||||
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No"));
|
||||
}
|
||||
|
||||
String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
|
||||
|
||||
if(plan.getHostId() != null && haVmTag == null){
|
||||
Long hostIdSpecified = plan.getHostId();
|
||||
if (s_logger.isDebugEnabled()){
|
||||
s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: "
|
||||
+ hostIdSpecified);
|
||||
}
|
||||
HostVO host = _hostDao.findById(hostIdSpecified);
|
||||
if (host == null) {
|
||||
s_logger.debug("The specified host cannot be found");
|
||||
} else if (avoid.shouldAvoid(host)) {
|
||||
s_logger.debug("The specified host is in avoid set");
|
||||
} else {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
|
||||
}
|
||||
|
||||
// search for storage under the zone, pod, cluster of the host.
|
||||
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
|
||||
host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
|
||||
|
||||
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile,
|
||||
lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
|
||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
|
||||
List<Volume> readyAndReusedVolumes = result.second();
|
||||
|
||||
// choose the potential pool for this VM for this host
|
||||
if (!suitableVolumeStoragePools.isEmpty()) {
|
||||
List<Host> suitableHosts = new ArrayList<Host>();
|
||||
suitableHosts.add(host);
|
||||
|
||||
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
|
||||
suitableHosts, suitableVolumeStoragePools);
|
||||
if (potentialResources != null) {
|
||||
Pod pod = _podDao.findById(host.getPodId());
|
||||
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
||||
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
|
||||
// remove the reused vol<->pool from destination, since
|
||||
// we don't have to prepare this volume.
|
||||
for (Volume vol : readyAndReusedVolumes) {
|
||||
storageVolMap.remove(vol);
|
||||
}
|
||||
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
|
||||
s_logger.debug("Returning Deployment Destination: " + dest);
|
||||
return dest;
|
||||
}
|
||||
}
|
||||
}
|
||||
s_logger.debug("Cannnot deploy to specified host, returning.");
|
||||
return null;
|
||||
}
|
||||
|
||||
if (vm.getLastHostId() != null && haVmTag == null) {
|
||||
s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());
|
||||
|
||||
HostVO host = _hostDao.findById(vm.getLastHostId());
|
||||
if(host == null){
|
||||
s_logger.debug("The last host of this VM cannot be found");
|
||||
}else if(avoid.shouldAvoid(host)){
|
||||
s_logger.debug("The last host of this VM is in avoid set");
|
||||
}else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
|
||||
s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
|
||||
}else{
|
||||
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
|
||||
long cluster_id = host.getClusterId();
|
||||
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
|
||||
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
|
||||
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
|
||||
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
|
||||
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){
|
||||
s_logger.debug("The last host of this VM is UP and has enough capacity");
|
||||
s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
|
||||
//search for storage under the zone, pod, cluster of the last host.
|
||||
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
|
||||
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
|
||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
|
||||
List<Volume> readyAndReusedVolumes = result.second();
|
||||
//choose the potential pool for this VM for this host
|
||||
if(!suitableVolumeStoragePools.isEmpty()){
|
||||
List<Host> suitableHosts = new ArrayList<Host>();
|
||||
suitableHosts.add(host);
|
||||
|
||||
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
|
||||
if(potentialResources != null){
|
||||
Pod pod = _podDao.findById(host.getPodId());
|
||||
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
||||
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
|
||||
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
|
||||
for(Volume vol : readyAndReusedVolumes){
|
||||
storageVolMap.remove(vol);
|
||||
}
|
||||
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
|
||||
s_logger.debug("Returning Deployment Destination: "+ dest);
|
||||
return dest;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("The last host of this VM does not have enough capacity");
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "+host.getStatus().name() + ", host resource state is: "+host.getResourceState());
|
||||
}
|
||||
}
|
||||
s_logger.debug("Cannot choose the last host to deploy this VM ");
|
||||
}
|
||||
|
||||
|
||||
List<Long> clusterList = new ArrayList<Long>();
|
||||
if (plan.getClusterId() != null) {
|
||||
Long clusterIdSpecified = plan.getClusterId();
|
||||
s_logger.debug("Searching resources only under specified Cluster: "+ clusterIdSpecified);
|
||||
ClusterVO cluster = _clusterDao.findById(plan.getClusterId());
|
||||
if (cluster != null ){
|
||||
clusterList.add(clusterIdSpecified);
|
||||
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
|
||||
if (avoid.shouldAvoid(cluster)) {
|
||||
s_logger.debug("The specified cluster is in avoid set, returning.");
|
||||
} else {
|
||||
clusterList.add(clusterIdSpecified);
|
||||
removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
|
||||
}
|
||||
return clusterList;
|
||||
}else{
|
||||
s_logger.debug("The specified cluster cannot be found, returning.");
|
||||
avoid.addCluster(plan.getClusterId());
|
||||
|
|
@ -280,11 +149,15 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
HostPodVO pod = _podDao.findById(podIdSpecified);
|
||||
if (pod != null) {
|
||||
DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
|
||||
if(dest == null){
|
||||
avoid.addPod(plan.getPodId());
|
||||
if (avoid.shouldAvoid(pod)) {
|
||||
s_logger.debug("The specified pod is in avoid set, returning.");
|
||||
} else {
|
||||
clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
|
||||
if (clusterList == null) {
|
||||
avoid.addPod(plan.getPodId());
|
||||
}
|
||||
}
|
||||
return dest;
|
||||
return clusterList;
|
||||
} else {
|
||||
s_logger.debug("The specified Pod cannot be found, returning.");
|
||||
avoid.addPod(plan.getPodId());
|
||||
|
|
@ -305,7 +178,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
}
|
||||
|
||||
private DeployDestination scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
|
||||
private List<Long> scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
|
||||
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
int requiredCpu = offering.getCpu() * offering.getSpeed();
|
||||
|
|
@ -341,20 +214,24 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
if(!podsWithCapacity.isEmpty()){
|
||||
|
||||
prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan);
|
||||
if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No Pods found for destination, returning.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
List<Long> clusterList = new ArrayList<Long>();
|
||||
//loop over pods
|
||||
for(Long podId : prioritizedPodIds){
|
||||
s_logger.debug("Checking resources under Pod: "+podId);
|
||||
DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid);
|
||||
if(dest != null){
|
||||
return dest;
|
||||
List<Long> clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan,
|
||||
avoid);
|
||||
if (clustersUnderPod != null) {
|
||||
clusterList.addAll(clustersUnderPod);
|
||||
}
|
||||
avoid.addPod(podId);
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No Pods found for destination, returning.");
|
||||
}
|
||||
return null;
|
||||
return clusterList;
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning.");
|
||||
|
|
@ -363,7 +240,69 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
}
|
||||
}
|
||||
|
||||
private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
|
||||
private Map<Short, Float> getCapacityThresholdMap() {
|
||||
// Lets build this real time so that the admin wont have to restart MS
|
||||
// if he changes these values
|
||||
Map<Short, Float> disableThresholdMap = new HashMap<Short, Float>();
|
||||
|
||||
String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key());
|
||||
float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F);
|
||||
disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold);
|
||||
|
||||
String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key());
|
||||
float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F);
|
||||
disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold);
|
||||
|
||||
return disableThresholdMap;
|
||||
}
|
||||
|
||||
private List<Short> getCapacitiesForCheckingThreshold() {
|
||||
List<Short> capacityList = new ArrayList<Short>();
|
||||
capacityList.add(Capacity.CAPACITY_TYPE_CPU);
|
||||
capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
|
||||
return capacityList;
|
||||
}
|
||||
|
||||
private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid,
|
||||
VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan) {
|
||||
|
||||
List<Short> capacityList = getCapacitiesForCheckingThreshold();
|
||||
List<Long> clustersCrossingThreshold = new ArrayList<Long>();
|
||||
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
int cpu_requested = offering.getCpu() * offering.getSpeed();
|
||||
long ram_requested = offering.getRamSize() * 1024L * 1024L;
|
||||
|
||||
// For each capacity get the cluster list crossing the threshold and
|
||||
// remove it from the clusterList that will be used for vm allocation.
|
||||
for (short capacity : capacityList) {
|
||||
|
||||
if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0) {
|
||||
return;
|
||||
}
|
||||
if (capacity == Capacity.CAPACITY_TYPE_CPU) {
|
||||
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
|
||||
plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested);
|
||||
} else if (capacity == Capacity.CAPACITY_TYPE_MEMORY) {
|
||||
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
|
||||
plan.getDataCenterId(), Config.MemoryCapacityDisableThreshold.key(), ram_requested);
|
||||
}
|
||||
|
||||
if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0) {
|
||||
// addToAvoid Set
|
||||
avoid.addClusterList(clustersCrossingThreshold);
|
||||
// Remove clusters crossing disabled threshold
|
||||
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
|
||||
|
||||
s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
|
||||
" crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private List<Long> scanClustersForDestinationInZoneOrPod(long id, boolean isZone,
|
||||
VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid) {
|
||||
|
||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
|
|
@ -396,6 +335,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
prioritizedClusterIds.removeAll(disabledClusters);
|
||||
}
|
||||
}
|
||||
|
||||
removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan);
|
||||
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found having a host with enough capacity, returning.");
|
||||
|
|
@ -404,7 +346,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
}
|
||||
if(!prioritizedClusterIds.isEmpty()){
|
||||
List<Long> clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan);
|
||||
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
|
||||
return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning.");
|
||||
|
|
@ -452,114 +394,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
return disabledPods;
|
||||
}
|
||||
|
||||
private List<Short> getCapacitiesForCheckingThreshold(){
|
||||
List<Short> capacityList = new ArrayList<Short>();
|
||||
capacityList.add(Capacity.CAPACITY_TYPE_CPU);
|
||||
capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
|
||||
return capacityList;
|
||||
}
|
||||
|
||||
private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
|
||||
List<Short> capacityList = getCapacitiesForCheckingThreshold();
|
||||
List<Long> clustersCrossingThreshold = new ArrayList<Long>();
|
||||
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
int cpu_requested = offering.getCpu() * offering.getSpeed();
|
||||
long ram_requested = offering.getRamSize() * 1024L * 1024L;
|
||||
|
||||
// For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation.
|
||||
for(short capacity : capacityList){
|
||||
|
||||
if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){
|
||||
return;
|
||||
}
|
||||
if (capacity == Capacity.CAPACITY_TYPE_CPU) {
|
||||
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested);
|
||||
}
|
||||
else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) {
|
||||
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
|
||||
Config.MemoryCapacityDisableThreshold.key(), ram_requested );
|
||||
}
|
||||
|
||||
|
||||
if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){
|
||||
// addToAvoid Set
|
||||
avoid.addClusterList(clustersCrossingThreshold);
|
||||
// Remove clusters crossing disabled threshold
|
||||
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
|
||||
|
||||
s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
|
||||
" crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
DeploymentPlan plan, ExcludeList avoid, DataCenter dc){
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("ClusterId List to consider: " + clusterList);
|
||||
}
|
||||
|
||||
removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
|
||||
|
||||
for(Long clusterId : clusterList){
|
||||
Cluster clusterVO = _clusterDao.findById(clusterId);
|
||||
|
||||
if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
|
||||
s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
|
||||
avoid.addCluster(clusterVO.getId());
|
||||
continue;
|
||||
}
|
||||
|
||||
s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId());
|
||||
//search for resources(hosts and storage) under this zone, pod, cluster.
|
||||
DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
|
||||
|
||||
//find suitable hosts under this cluster, need as many hosts as we get.
|
||||
List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
|
||||
//if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM
|
||||
if(suitableHosts != null && !suitableHosts.isEmpty()){
|
||||
if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
|
||||
Pod pod = _podDao.findById(clusterVO.getPodId());
|
||||
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
|
||||
return dest;
|
||||
}
|
||||
|
||||
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
|
||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
|
||||
List<Volume> readyAndReusedVolumes = result.second();
|
||||
|
||||
//choose the potential host and pool for the VM
|
||||
if(!suitableVolumeStoragePools.isEmpty()){
|
||||
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
|
||||
|
||||
if(potentialResources != null){
|
||||
Pod pod = _podDao.findById(clusterVO.getPodId());
|
||||
Host host = _hostDao.findById(potentialResources.first().getId());
|
||||
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
|
||||
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
|
||||
for(Volume vol : readyAndReusedVolumes){
|
||||
storageVolMap.remove(vol);
|
||||
}
|
||||
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap );
|
||||
s_logger.debug("Returning Deployment Destination: "+ dest);
|
||||
return dest;
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId);
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("No suitable hosts found under this Cluster: "+clusterId);
|
||||
}
|
||||
avoid.addCluster(clusterVO.getId());
|
||||
}
|
||||
s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){
|
||||
//look at the aggregate available cpu and ram per cluster
|
||||
|
|
@ -630,215 +464,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
}
|
||||
|
||||
|
||||
protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools){
|
||||
s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
|
||||
|
||||
boolean hostCanAccessPool = false;
|
||||
boolean haveEnoughSpace = false;
|
||||
Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
|
||||
TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
|
||||
@Override
|
||||
public int compare(Volume v1, Volume v2) {
|
||||
if(v1.getSize() < v2.getSize())
|
||||
return 1;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
});
|
||||
volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
|
||||
boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
|
||||
for(Host potentialHost : suitableHosts){
|
||||
Map<StoragePool,List<Volume>> volumeAllocationMap = new HashMap<StoragePool,List<Volume>>();
|
||||
for(Volume vol : volumesOrderBySizeDesc){
|
||||
haveEnoughSpace = false;
|
||||
s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType());
|
||||
List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
|
||||
hostCanAccessPool = false;
|
||||
for(StoragePool potentialSPool : volumePoolList){
|
||||
if(hostCanAccessSPool(potentialHost, potentialSPool)){
|
||||
hostCanAccessPool = true;
|
||||
if(multipleVolume){
|
||||
List<Volume> requestVolumes = null;
|
||||
if(volumeAllocationMap.containsKey(potentialSPool))
|
||||
requestVolumes = volumeAllocationMap.get(potentialSPool);
|
||||
else
|
||||
requestVolumes = new ArrayList<Volume>();
|
||||
requestVolumes.add(vol);
|
||||
|
||||
if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
|
||||
continue;
|
||||
volumeAllocationMap.put(potentialSPool,requestVolumes);
|
||||
}
|
||||
storage.put(vol, potentialSPool);
|
||||
haveEnoughSpace = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!hostCanAccessPool){
|
||||
break;
|
||||
}
|
||||
if(!haveEnoughSpace) {
|
||||
s_logger.warn("insufficient capacity to allocate all volumes");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(hostCanAccessPool && haveEnoughSpace){
|
||||
s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM");
|
||||
return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
|
||||
}
|
||||
}
|
||||
s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM");
|
||||
return null;
|
||||
}
|
||||
|
||||
protected boolean hostCanAccessSPool(Host host, StoragePool pool){
|
||||
boolean hostCanAccessSPool = false;
|
||||
|
||||
StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId());
|
||||
if(hostPoolLinkage != null){
|
||||
hostCanAccessSPool = true;
|
||||
}
|
||||
|
||||
s_logger.debug("Host: "+ host.getId() + (hostCanAccessSPool ?" can" : " cannot") + " access pool: "+ pool.getId());
|
||||
return hostCanAccessSPool;
|
||||
}
|
||||
|
||||
protected List<Host> findSuitableHosts(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
|
||||
List<Host> suitableHosts = new ArrayList<Host>();
|
||||
for(HostAllocator allocator : _hostAllocators) {
|
||||
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
|
||||
if (suitableHosts != null && !suitableHosts.isEmpty()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(suitableHosts.isEmpty()){
|
||||
s_logger.debug("No suitable hosts found");
|
||||
}
|
||||
return suitableHosts;
|
||||
}
|
||||
|
||||
protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
|
||||
List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
|
||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
|
||||
List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
|
||||
|
||||
//for each volume find list of suitable storage pools by calling the allocators
|
||||
for (VolumeVO toBeCreated : volumesTobeCreated) {
|
||||
s_logger.debug("Checking suitable pools for volume (Id, Type): ("+toBeCreated.getId() +"," +toBeCreated.getVolumeType().name() + ")");
|
||||
|
||||
//If the plan specifies a poolId, it means that this VM's ROOT volume is ready and the pool should be reused.
|
||||
//In this case, also check if rest of the volumes are ready and can be reused.
|
||||
if(plan.getPoolId() != null){
|
||||
s_logger.debug("Volume has pool(" + plan.getPoolId() + ") already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId());
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
StoragePool pool = null;
|
||||
if(toBeCreated.getPoolId() != null){
|
||||
s_logger.debug("finding pool by id '" + toBeCreated.getPoolId() + "'");
|
||||
pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
|
||||
}else{
|
||||
s_logger.debug("finding pool by id '" + plan.getPoolId() + "'");
|
||||
pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
|
||||
}
|
||||
|
||||
if(pool != null){
|
||||
if(!pool.isInMaintenance()){
|
||||
if(!avoid.shouldAvoid(pool)){
|
||||
long exstPoolDcId = pool.getDataCenterId();
|
||||
|
||||
long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
|
||||
long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
|
||||
if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){
|
||||
s_logger.debug("Planner need not allocate a pool for this volume since its READY");
|
||||
suitablePools.add(pool);
|
||||
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
|
||||
if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
|
||||
readyAndReusedVolumes.add(toBeCreated);
|
||||
}
|
||||
continue;
|
||||
}else{
|
||||
s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
|
||||
}
|
||||
}else{
|
||||
s_logger.debug("Unable to find pool by provided id");
|
||||
}
|
||||
}
|
||||
|
||||
if(s_logger.isDebugEnabled()){
|
||||
s_logger.debug("We need to allocate new storagepool for this volume");
|
||||
}
|
||||
if(!isRootAdmin(plan.getReservationContext())){
|
||||
if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){
|
||||
if(s_logger.isDebugEnabled()){
|
||||
s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
|
||||
s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
|
||||
}
|
||||
//Cannot find suitable storage pools under this cluster for this volume since allocation_state is disabled.
|
||||
//- remove any suitable pools found for other volumes.
|
||||
//All volumes should get suitable pools under this cluster; else we cant use this cluster.
|
||||
suitableVolumeStoragePools.clear();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
|
||||
|
||||
DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
|
||||
DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
|
||||
|
||||
boolean useLocalStorage = false;
|
||||
if (vmProfile.getType() != VirtualMachine.Type.User) {
|
||||
String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key());
|
||||
if (ssvmUseLocalStorage.equalsIgnoreCase("true")) {
|
||||
useLocalStorage = true;
|
||||
}
|
||||
} else {
|
||||
useLocalStorage = diskOffering.getUseLocalStorage();
|
||||
|
||||
// TODO: this is a hacking fix for the problem of deploy ISO-based VM on local storage
|
||||
// when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually
|
||||
// saved in service offering, overrde the flag from service offering when it is a ROOT disk
|
||||
if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
|
||||
if(toBeCreated.getVolumeType() == Volume.Type.ROOT)
|
||||
useLocalStorage = true;
|
||||
}
|
||||
}
|
||||
diskProfile.setUseLocalStorage(useLocalStorage);
|
||||
|
||||
boolean foundPotentialPools = false;
|
||||
for(StoragePoolAllocator allocator : _storagePoolAllocators) {
|
||||
final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
|
||||
if (suitablePools != null && !suitablePools.isEmpty()) {
|
||||
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
|
||||
foundPotentialPools = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!foundPotentialPools){
|
||||
s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId());
|
||||
//No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes.
|
||||
//All volumes should get suitable pools under this cluster; else we cant use this cluster.
|
||||
suitableVolumeStoragePools.clear();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(suitableVolumeStoragePools.isEmpty()){
|
||||
s_logger.debug("No suitable pools found");
|
||||
}
|
||||
|
||||
return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
|
||||
}
|
||||
|
||||
|
||||
private boolean isRootAdmin(ReservationContext reservationContext) {
|
||||
if(reservationContext != null){
|
||||
if(reservationContext.getAccount() != null){
|
||||
|
|
@ -859,10 +484,17 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
@Override
|
||||
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
|
||||
if(vm.getHypervisorType() != HypervisorType.BareMetal){
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) {
|
||||
return true;
|
||||
// check what the ServiceOffering says. If null, check the global config
|
||||
ServiceOffering offering = vm.getServiceOffering();
|
||||
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
|
||||
if (offering != null && offering.getDeploymentPlanner() != null) {
|
||||
if (offering.getDeploymentPlanner().equals(this.getName())) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (_globalDeploymentPlanner != null && _globalDeploymentPlanner.equals(this._name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
@ -872,29 +504,20 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
_allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
|
||||
_globalDeploymentPlanner = _configDao.getValue(Config.VmDeploymentPlanner.key());
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){
|
||||
// Check if the zone exists in the system
|
||||
DataCenterVO zone = _dcDao.findById(zoneId);
|
||||
if(zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()){
|
||||
s_logger.info("Zone is currently disabled, cannot allocate to this zone: "+ zoneId);
|
||||
return false;
|
||||
}
|
||||
|
||||
Pod pod = _podDao.findById(podId);
|
||||
if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){
|
||||
s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId);
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan,
|
||||
ExcludeList avoid) throws InsufficientServerCapacityException {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
Cluster cluster = _clusterDao.findById(clusterId);
|
||||
if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){
|
||||
s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
@Override
|
||||
public PlannerResourceUsage getResourceUsage() {
|
||||
return PlannerResourceUsage.Shared;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,54 +0,0 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
|
||||
@Local(value = {DeployPlannerSelector.class})
|
||||
public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector {
|
||||
private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class);
|
||||
|
||||
@Override
|
||||
public String selectPlanner(UserVmVO vm) {
|
||||
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null) {
|
||||
if (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString())
|
||||
|| _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString())) {
|
||||
return "FirstFitPlanner";
|
||||
} else if (_allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
|
||||
return "UserDispersingPlanner";
|
||||
} else if (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString())
|
||||
|| _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) {
|
||||
return "UserConcentratedPodPlanner";
|
||||
}
|
||||
} else {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("The allocation algorithm is null, cannot select the planner");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.EnumType;
|
||||
import javax.persistence.Enumerated;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.GenerationType;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.Table;
|
||||
import org.apache.cloudstack.api.InternalIdentity;
|
||||
|
||||
import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
|
||||
|
||||
@Entity
|
||||
@Table(name = "op_host_planner_reservation")
|
||||
public class PlannerHostReservationVO implements InternalIdentity {
|
||||
@Id
|
||||
@GeneratedValue(strategy=GenerationType.IDENTITY)
|
||||
@Column(name="id")
|
||||
private long id;
|
||||
|
||||
@Column(name="host_id")
|
||||
private Long hostId;
|
||||
|
||||
@Column(name="data_center_id")
|
||||
private Long dataCenterId;
|
||||
|
||||
@Column(name="pod_id")
|
||||
private Long podId;
|
||||
|
||||
@Column(name="cluster_id")
|
||||
private Long clusterId;
|
||||
|
||||
@Column(name = "resource_usage")
|
||||
@Enumerated(EnumType.STRING)
|
||||
private PlannerResourceUsage resourceUsage;
|
||||
|
||||
public PlannerHostReservationVO() {
|
||||
}
|
||||
|
||||
public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId) {
|
||||
this.hostId = hostId;
|
||||
this.dataCenterId = dataCenterId;
|
||||
this.podId = podId;
|
||||
this.clusterId = clusterId;
|
||||
}
|
||||
|
||||
public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId,
|
||||
PlannerResourceUsage resourceUsage) {
|
||||
this.hostId = hostId;
|
||||
this.dataCenterId = dataCenterId;
|
||||
this.podId = podId;
|
||||
this.clusterId = clusterId;
|
||||
this.resourceUsage = resourceUsage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public Long getHostId() {
|
||||
return hostId;
|
||||
}
|
||||
|
||||
public void setHostId(Long hostId) {
|
||||
this.hostId = hostId;
|
||||
}
|
||||
|
||||
public Long getDataCenterId() {
|
||||
return dataCenterId;
|
||||
}
|
||||
public void setDataCenterId(Long dataCenterId) {
|
||||
this.dataCenterId = dataCenterId;
|
||||
}
|
||||
|
||||
public Long getPodId() {
|
||||
return podId;
|
||||
}
|
||||
public void setPodId(long podId) {
|
||||
this.podId = new Long(podId);
|
||||
}
|
||||
|
||||
public Long getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
public void setClusterId(long clusterId) {
|
||||
this.clusterId = new Long(clusterId);
|
||||
}
|
||||
|
||||
public PlannerResourceUsage getResourceUsage() {
|
||||
return resourceUsage;
|
||||
}
|
||||
|
||||
public void setResourceUsage(PlannerResourceUsage resourceType) {
|
||||
this.resourceUsage = resourceType;
|
||||
}
|
||||
|
||||
}
|
||||
54
server/src/com/cloud/deploy/DeployPlannerSelector.java → server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java
Executable file → Normal file
54
server/src/com/cloud/deploy/DeployPlannerSelector.java → server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java
Executable file → Normal file
|
|
@ -1,24 +1,30 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy;
|
||||
|
||||
import com.cloud.utils.component.Adapter;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
|
||||
public interface DeployPlannerSelector extends Adapter {
|
||||
String selectPlanner(UserVmVO vm);
|
||||
}
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy.dao;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.deploy.PlannerHostReservationVO;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
public interface PlannerHostReservationDao extends GenericDao<PlannerHostReservationVO, Long> {
|
||||
|
||||
PlannerHostReservationVO findByHostId(long hostId);
|
||||
|
||||
List<PlannerHostReservationVO> listAllReservedHosts();
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.deploy.dao;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import javax.ejb.Local;
|
||||
import com.cloud.deploy.PlannerHostReservationVO;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
|
||||
@Local(value = { PlannerHostReservationDao.class })
|
||||
public class PlannerHostReservationDaoImpl extends GenericDaoBase<PlannerHostReservationVO, Long> implements
|
||||
PlannerHostReservationDao {
|
||||
|
||||
private SearchBuilder<PlannerHostReservationVO> _hostIdSearch;
|
||||
private SearchBuilder<PlannerHostReservationVO> _reservedHostSearch;
|
||||
|
||||
public PlannerHostReservationDaoImpl() {
|
||||
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
protected void init() {
|
||||
_hostIdSearch = createSearchBuilder();
|
||||
_hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ);
|
||||
_hostIdSearch.done();
|
||||
|
||||
_reservedHostSearch = createSearchBuilder();
|
||||
_reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL);
|
||||
_reservedHostSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PlannerHostReservationVO findByHostId(long hostId) {
|
||||
SearchCriteria<PlannerHostReservationVO> sc = _hostIdSearch.create();
|
||||
sc.setParameters("hostId", hostId);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<PlannerHostReservationVO> listAllReservedHosts() {
|
||||
SearchCriteria<PlannerHostReservationVO> sc = _reservedHostSearch.create();
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -284,6 +284,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
Long guestNetworkId, boolean sourceNat, boolean assign, String requestedIp, boolean isSystem, Long vpcId)
|
||||
throws InsufficientAddressCapacityException {
|
||||
StringBuilder errorMessage = new StringBuilder("Unable to get ip adress in ");
|
||||
boolean fetchFromDedicatedRange = false;
|
||||
List<Long> dedicatedVlanDbIds = new ArrayList<Long>();
|
||||
List<Long> nonDedicatedVlanDbIds = new ArrayList<Long>();
|
||||
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
txn.start();
|
||||
SearchCriteria<IPAddressVO> sc = null;
|
||||
|
|
@ -296,9 +300,37 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
errorMessage.append(" zone id=" + dcId);
|
||||
}
|
||||
|
||||
if ( vlanDbIds != null && !vlanDbIds.isEmpty() ) {
|
||||
sc.setParameters("vlanId", vlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + vlanDbIds.toArray());
|
||||
// If owner has dedicated Public IP ranges, fetch IP from the dedicated range
|
||||
// Otherwise fetch IP from the system pool
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId());
|
||||
for (AccountVlanMapVO map : maps) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId()))
|
||||
dedicatedVlanDbIds.add(map.getVlanDbId());
|
||||
}
|
||||
List<VlanVO> nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(dcId);
|
||||
for (VlanVO nonDedicatedVlan : nonDedicatedVlans) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(nonDedicatedVlan.getId()))
|
||||
nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId());
|
||||
}
|
||||
if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) {
|
||||
fetchFromDedicatedRange = true;
|
||||
sc.setParameters("vlanId", dedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + dedicatedVlanDbIds.toArray());
|
||||
} else if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) {
|
||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + nonDedicatedVlanDbIds.toArray());
|
||||
} else {
|
||||
if (podId != null) {
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException
|
||||
("Insufficient address capacity", Pod.class, podId);
|
||||
ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
s_logger.warn(errorMessage.toString());
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException
|
||||
("Insufficient address capacity", DataCenter.class, dcId);
|
||||
ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
|
||||
sc.setParameters("dc", dcId);
|
||||
|
|
@ -321,6 +353,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
|
||||
List<IPAddressVO> addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
|
||||
// If all the dedicated IPs of the owner are in use fetch an IP from the system pool
|
||||
if (addrs.size() == 0 && fetchFromDedicatedRange) {
|
||||
if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) {
|
||||
fetchFromDedicatedRange = false;
|
||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + nonDedicatedVlanDbIds.toArray());
|
||||
addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (addrs.size() == 0) {
|
||||
if (podId != null) {
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException
|
||||
|
|
@ -338,6 +380,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
|
||||
assert (addrs.size() == 1) : "Return size is incorrect: " + addrs.size();
|
||||
|
||||
if (!fetchFromDedicatedRange) {
|
||||
// Check that the maximum number of public IPs for the given accountId will not be exceeded
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip);
|
||||
} catch (ResourceAllocationException ex) {
|
||||
s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner);
|
||||
throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded.");
|
||||
}
|
||||
}
|
||||
|
||||
IPAddressVO addr = addrs.get(0);
|
||||
addr.setSourceNat(sourceNat);
|
||||
addr.setAllocatedTime(new Date());
|
||||
|
|
@ -442,14 +494,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
|
||||
long ownerId = owner.getId();
|
||||
|
||||
// Check that the maximum number of public IPs for the given accountId will not be exceeded
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip);
|
||||
} catch (ResourceAllocationException ex) {
|
||||
s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner);
|
||||
throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded.");
|
||||
}
|
||||
|
||||
PublicIp ip = null;
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
try {
|
||||
|
|
@ -466,15 +510,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
s_logger.debug("lock account " + ownerId + " is acquired");
|
||||
}
|
||||
|
||||
// If account has Account specific ip ranges, try to allocate ip from there
|
||||
List<Long> vlanIds = new ArrayList<Long>();
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByAccount(ownerId);
|
||||
if (maps != null && !maps.isEmpty()) {
|
||||
vlanIds.add(maps.get(0).getVlanDbId());
|
||||
}
|
||||
|
||||
|
||||
ip = fetchNewPublicIp(dcId, null, vlanIds, owner, VlanType.VirtualNetwork, guestNtwkId,
|
||||
ip = fetchNewPublicIp(dcId, null, null, owner, VlanType.VirtualNetwork, guestNtwkId,
|
||||
isSourceNat, false, null, false, vpcId);
|
||||
IPAddressVO publicIp = ip.ip();
|
||||
|
||||
|
|
@ -610,9 +646,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
|
||||
VlanType vlanType = VlanType.VirtualNetwork;
|
||||
boolean assign = false;
|
||||
boolean allocateFromDedicatedRange = false;
|
||||
List<Long> dedicatedVlanDbIds = new ArrayList<Long>();
|
||||
List<Long> nonDedicatedVlanDbIds = new ArrayList<Long>();
|
||||
|
||||
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) {
|
||||
// zone is of type DataCenter. See DataCenterVO.java.
|
||||
|
|
@ -642,39 +675,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
|
||||
txn.start();
|
||||
|
||||
// If account has dedicated Public IP ranges, allocate IP from the dedicated range
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByAccount(ipOwner.getId());
|
||||
for (AccountVlanMapVO map : maps) {
|
||||
dedicatedVlanDbIds.add(map.getVlanDbId());
|
||||
}
|
||||
if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) {
|
||||
allocateFromDedicatedRange = true;
|
||||
}
|
||||
|
||||
try {
|
||||
if (allocateFromDedicatedRange) {
|
||||
ip = fetchNewPublicIp(zone.getId(), null, dedicatedVlanDbIds, ipOwner, vlanType, null,
|
||||
false, assign, null, isSystem, null);
|
||||
}
|
||||
} catch(InsufficientAddressCapacityException e) {
|
||||
s_logger.warn("All IPs dedicated to account " + ipOwner.getId() + " has been acquired." +
|
||||
" Now acquiring from the system pool");
|
||||
txn.close();
|
||||
allocateFromDedicatedRange = false;
|
||||
}
|
||||
|
||||
if (!allocateFromDedicatedRange) {
|
||||
// Check that the maximum number of public IPs for the given
|
||||
// accountId will not be exceeded
|
||||
_resourceLimitMgr.checkResourceLimit(accountToLock, ResourceType.public_ip);
|
||||
|
||||
List<VlanVO> nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(zone.getId());
|
||||
for (VlanVO nonDedicatedVlan : nonDedicatedVlans) {
|
||||
nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId());
|
||||
}
|
||||
ip = fetchNewPublicIp(zone.getId(), null, nonDedicatedVlanDbIds, ipOwner, vlanType, null, false, assign, null,
|
||||
isSystem, null);
|
||||
}
|
||||
ip = fetchNewPublicIp(zone.getId(), null, null, ipOwner, vlanType, null, false, assign, null,
|
||||
isSystem, null);
|
||||
|
||||
if (ip == null) {
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException
|
||||
|
|
@ -3004,6 +3006,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
|
||||
Random _rand = new Random(System.currentTimeMillis());
|
||||
|
||||
@Override
|
||||
public List<? extends Nic> listVmNics(Long vmId, Long nicId) {
|
||||
List<NicVO> result = null;
|
||||
if (nicId == null) {
|
||||
|
|
@ -3014,6 +3017,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp)
|
||||
throws InsufficientAddressCapacityException {
|
||||
String ipaddr = null;
|
||||
|
|
|
|||
|
|
@ -315,9 +315,10 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet
|
|||
List<StorageNetworkIpRangeVO> ranges = _sNwIpRangeDao.listByPodId(podId);
|
||||
for (StorageNetworkIpRangeVO r : ranges) {
|
||||
try {
|
||||
r = _sNwIpRangeDao.acquireInLockTable(r.getId());
|
||||
Long rangeId = r.getId();
|
||||
r = _sNwIpRangeDao.acquireInLockTable(rangeId);
|
||||
if (r == null) {
|
||||
String msg = "Unable to acquire lock on storage network ip range id=" + r.getId() + ", delete failed";
|
||||
String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed";
|
||||
s_logger.warn(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,24 +27,6 @@ import java.util.TreeSet;
|
|||
import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.network.vpc.NetworkACLItem;
|
||||
import com.cloud.network.vpc.NetworkACLItemDao;
|
||||
import com.cloud.network.vpc.NetworkACLItemVO;
|
||||
import com.cloud.network.vpc.NetworkACLManager;
|
||||
import com.cloud.network.vpc.PrivateGateway;
|
||||
import com.cloud.network.vpc.PrivateIpAddress;
|
||||
import com.cloud.network.vpc.PrivateIpVO;
|
||||
import com.cloud.network.vpc.StaticRoute;
|
||||
import com.cloud.network.vpc.StaticRouteProfile;
|
||||
import com.cloud.network.vpc.Vpc;
|
||||
import com.cloud.network.vpc.VpcGateway;
|
||||
import com.cloud.network.vpc.VpcManager;
|
||||
import com.cloud.network.vpc.VpcVO;
|
||||
import com.cloud.network.vpc.dao.PrivateIpDao;
|
||||
import com.cloud.network.vpc.dao.StaticRouteDao;
|
||||
import com.cloud.network.vpc.dao.VpcDao;
|
||||
import com.cloud.network.vpc.dao.VpcGatewayDao;
|
||||
import com.cloud.network.vpc.dao.VpcOfferingDao;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
|
|
@ -108,6 +90,24 @@ import com.cloud.network.dao.Site2SiteCustomerGatewayVO;
|
|||
import com.cloud.network.dao.Site2SiteVpnConnectionDao;
|
||||
import com.cloud.network.dao.Site2SiteVpnGatewayDao;
|
||||
import com.cloud.network.dao.Site2SiteVpnGatewayVO;
|
||||
import com.cloud.network.vpc.NetworkACLItem;
|
||||
import com.cloud.network.vpc.NetworkACLItemDao;
|
||||
import com.cloud.network.vpc.NetworkACLItemVO;
|
||||
import com.cloud.network.vpc.NetworkACLManager;
|
||||
import com.cloud.network.vpc.PrivateGateway;
|
||||
import com.cloud.network.vpc.PrivateIpAddress;
|
||||
import com.cloud.network.vpc.PrivateIpVO;
|
||||
import com.cloud.network.vpc.StaticRoute;
|
||||
import com.cloud.network.vpc.StaticRouteProfile;
|
||||
import com.cloud.network.vpc.Vpc;
|
||||
import com.cloud.network.vpc.VpcGateway;
|
||||
import com.cloud.network.vpc.VpcManager;
|
||||
import com.cloud.network.vpc.VpcVO;
|
||||
import com.cloud.network.vpc.dao.PrivateIpDao;
|
||||
import com.cloud.network.vpc.dao.StaticRouteDao;
|
||||
import com.cloud.network.vpc.dao.VpcDao;
|
||||
import com.cloud.network.vpc.dao.VpcGatewayDao;
|
||||
import com.cloud.network.vpc.dao.VpcOfferingDao;
|
||||
import com.cloud.network.vpn.Site2SiteVpnManager;
|
||||
import com.cloud.offering.NetworkOffering;
|
||||
import com.cloud.user.Account;
|
||||
|
|
@ -127,7 +127,6 @@ import com.cloud.vm.VirtualMachineProfile;
|
|||
import com.cloud.vm.VirtualMachineProfile.Param;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
|
||||
@Component
|
||||
@Local(value = {VpcVirtualNetworkApplianceManager.class, VpcVirtualNetworkApplianceService.class})
|
||||
public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplianceManagerImpl implements VpcVirtualNetworkApplianceManager{
|
||||
|
|
@ -339,7 +338,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
|
|||
DomainRouterVO router = _routerDao.findById(vm.getId());
|
||||
if (router.getState() == State.Running) {
|
||||
try {
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName());
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType());
|
||||
|
||||
Commands cmds = new Commands(OnError.Stop);
|
||||
cmds.addCommand("plugnic", plugNicCmd);
|
||||
|
|
@ -748,7 +747,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
|
|||
// if (rule.getSourceCidrList() == null && (rule.getPurpose() == Purpose.Firewall || rule.getPurpose() == Purpose.NetworkACL)) {
|
||||
// _firewallDao.loadSourceCidrs((FirewallRuleVO)rule);
|
||||
// }
|
||||
NetworkACLTO ruleTO = new NetworkACLTO((NetworkACLItemVO)rule, guestVlan, rule.getTrafficType());
|
||||
NetworkACLTO ruleTO = new NetworkACLTO(rule, guestVlan, rule.getTrafficType());
|
||||
rulesTO.add(ruleTO);
|
||||
}
|
||||
}
|
||||
|
|
@ -828,7 +827,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
|
|||
_routerDao.update(routerVO.getId(), routerVO);
|
||||
}
|
||||
}
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), router.getInstanceName());
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), router.getInstanceName(), router.getType());
|
||||
cmds.addCommand(plugNicCmd);
|
||||
VpcVO vpc = _vpcDao.findById(router.getVpcId());
|
||||
NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(router.getPrivateIpAddress(), router.getInstanceName(), true, publicNic.getIp4Address(), vpc.getCidr());
|
||||
|
|
@ -851,7 +850,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
|
|||
for (Pair<Nic, Network> nicNtwk : guestNics) {
|
||||
Nic guestNic = nicNtwk.first();
|
||||
//plug guest nic
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, guestNic.getNetworkId(), null), router.getInstanceName());
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, guestNic.getNetworkId(), null), router.getInstanceName(), router.getType());
|
||||
cmds.addCommand(plugNicCmd);
|
||||
|
||||
if (!_networkModel.isPrivateGateway(guestNic)) {
|
||||
|
|
|
|||
|
|
@ -85,6 +85,10 @@ import com.cloud.dc.dao.ClusterVSMMapDao;
|
|||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.DataCenterIpAddressDao;
|
||||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.PlannerHostReservationVO;
|
||||
import com.cloud.deploy.dao.PlannerHostReservationDao;
|
||||
import com.cloud.event.ActionEvent;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.DiscoveryException;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
|
|
@ -212,6 +216,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
protected HighAvailabilityManager _haMgr;
|
||||
@Inject
|
||||
protected StorageService _storageSvr;
|
||||
@Inject
|
||||
PlannerHostReservationDao _plannerHostReserveDao;
|
||||
|
||||
protected List<? extends Discoverer> _discoverers;
|
||||
public List<? extends Discoverer> getDiscoverers() {
|
||||
|
|
@ -2851,4 +2857,41 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
ResourceState.Enabled);
|
||||
return sc.list();
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
@ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true)
|
||||
public boolean releaseHostReservation(Long hostId) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
try {
|
||||
txn.start();
|
||||
PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId);
|
||||
if (reservationEntry != null) {
|
||||
long id = reservationEntry.getId();
|
||||
PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true);
|
||||
if (hostReservation == null) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored.");
|
||||
}
|
||||
txn.rollback();
|
||||
return false;
|
||||
}
|
||||
hostReservation.setResourceUsage(null);
|
||||
_plannerHostReserveDao.persist(hostReservation);
|
||||
txn.commit();
|
||||
return true;
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Host reservation for host: " + hostId
|
||||
+ " does not even exist. Release reservartion call is ignored.");
|
||||
}
|
||||
return false;
|
||||
} catch (CloudRuntimeException e) {
|
||||
throw e;
|
||||
} catch (Throwable t) {
|
||||
s_logger.error("Unable to release host reservation for host: " + hostId, t);
|
||||
txn.rollback();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,11 @@ package com.cloud.server;
|
|||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.ManagementServerException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.exception.VirtualMachineMigrationException;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.ScaleSystemVMCmd;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
|
||||
import com.cloud.event.EventVO;
|
||||
|
|
@ -100,4 +105,5 @@ public interface ManagementServer extends ManagementService, PluggableService {
|
|||
void resetEncryptionKeyIV();
|
||||
|
||||
public void enableAdminUser(String password);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ import javax.crypto.spec.SecretKeySpec;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.exception.*;
|
||||
import com.cloud.vm.*;
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
|
||||
|
|
@ -78,6 +80,7 @@ import org.apache.cloudstack.api.command.admin.host.FindHostsForMigrationCmd;
|
|||
import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd;
|
||||
import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd;
|
||||
import org.apache.cloudstack.api.command.admin.host.ReleaseHostReservationCmd;
|
||||
import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
|
||||
import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
|
||||
import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd;
|
||||
|
|
@ -150,13 +153,7 @@ import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForM
|
|||
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd;
|
||||
import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.ListSystemVMsCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.MigrateSystemVMCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.RebootSystemVmCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.StartSystemVMCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.StopSystemVmCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.UpgradeSystemVMCmd;
|
||||
import org.apache.cloudstack.api.command.admin.systemvm.*;
|
||||
import org.apache.cloudstack.api.command.admin.template.PrepareTemplateCmd;
|
||||
import org.apache.cloudstack.api.command.admin.usage.AddTrafficMonitorCmd;
|
||||
import org.apache.cloudstack.api.command.admin.usage.AddTrafficTypeCmd;
|
||||
|
|
@ -462,6 +459,7 @@ import com.cloud.dc.dao.HostPodDao;
|
|||
import com.cloud.dc.dao.PodVlanMapDao;
|
||||
import com.cloud.dc.dao.VlanDao;
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.domain.DomainVO;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
|
|
@ -470,12 +468,6 @@ import com.cloud.event.ActionEventUtils;
|
|||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.EventVO;
|
||||
import com.cloud.event.dao.EventDao;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.exception.StorageUnavailableException;
|
||||
import com.cloud.ha.HighAvailabilityManager;
|
||||
import com.cloud.host.DetailVO;
|
||||
import com.cloud.host.Host;
|
||||
|
|
@ -569,17 +561,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
import com.cloud.utils.net.MacAddress;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.ssh.SSHKeysHelper;
|
||||
import com.cloud.vm.ConsoleProxyVO;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.InstanceGroupVO;
|
||||
import com.cloud.vm.SecondaryStorageVmVO;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.VirtualMachineManager;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import com.cloud.vm.VirtualMachineProfileImpl;
|
||||
import com.cloud.vm.dao.ConsoleProxyDao;
|
||||
import com.cloud.vm.dao.DomainRouterDao;
|
||||
import com.cloud.vm.dao.InstanceGroupDao;
|
||||
|
|
@ -589,6 +571,7 @@ import com.cloud.vm.dao.VMInstanceDao;
|
|||
|
||||
import edu.emory.mathcs.backport.java.util.Arrays;
|
||||
import edu.emory.mathcs.backport.java.util.Collections;
|
||||
import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd;
|
||||
|
||||
|
||||
public class ManagementServerImpl extends ManagerBase implements ManagementServer {
|
||||
|
|
@ -714,6 +697,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
|
||||
@Inject
|
||||
ConfigurationServer _configServer;
|
||||
@Inject
|
||||
UserVmManager _userVmMgr;
|
||||
|
||||
private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker"));
|
||||
private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker"));
|
||||
|
|
@ -726,11 +711,21 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
private List<UserAuthenticator> _userAuthenticators;
|
||||
private List<UserAuthenticator> _userPasswordEncoders;
|
||||
|
||||
protected List<DeploymentPlanner> _planners;
|
||||
|
||||
public List<DeploymentPlanner> getPlanners() {
|
||||
return _planners;
|
||||
}
|
||||
|
||||
public void setPlanners(List<DeploymentPlanner> _planners) {
|
||||
this._planners = _planners;
|
||||
}
|
||||
|
||||
@Inject ClusterManager _clusterMgr;
|
||||
private String _hashKey = null;
|
||||
private String _encryptionKey = null;
|
||||
private String _encryptionIV = null;
|
||||
|
||||
|
||||
@Inject
|
||||
protected AffinityGroupVMMapDao _affinityGroupVMMapDao;
|
||||
|
||||
|
|
@ -976,29 +971,29 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
String zoneType = cmd.getZoneType();
|
||||
String keyword = cmd.getKeyword();
|
||||
zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId);
|
||||
|
||||
|
||||
|
||||
|
||||
Filter searchFilter = new Filter(ClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
|
||||
|
||||
SearchBuilder<ClusterVO> sb = _clusterDao.createSearchBuilder();
|
||||
sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
|
||||
sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
|
||||
sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ);
|
||||
sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
|
||||
|
||||
SearchBuilder<ClusterVO> sb = _clusterDao.createSearchBuilder();
|
||||
sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
|
||||
sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
|
||||
sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ);
|
||||
sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
|
||||
sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ);
|
||||
sb.and("clusterType", sb.entity().getClusterType(), SearchCriteria.Op.EQ);
|
||||
sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ);
|
||||
|
||||
|
||||
if(zoneType != null) {
|
||||
SearchBuilder<DataCenterVO> zoneSb = _dcDao.createSearchBuilder();
|
||||
zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
|
||||
zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
|
||||
sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER);
|
||||
}
|
||||
|
||||
|
||||
SearchCriteria<ClusterVO> sc = sb.create();
|
||||
|
||||
|
||||
SearchCriteria<ClusterVO> sc = sb.create();
|
||||
if (id != null) {
|
||||
sc.setParameters("id", id);
|
||||
sc.setParameters("id", id);
|
||||
}
|
||||
|
||||
if (name != null) {
|
||||
|
|
@ -1026,9 +1021,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
}
|
||||
|
||||
if(zoneType != null) {
|
||||
sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
|
||||
sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
|
||||
}
|
||||
|
||||
|
||||
if (keyword != null) {
|
||||
SearchCriteria<ClusterVO> ssc = _clusterDao.createSearchCriteria();
|
||||
ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%");
|
||||
|
|
@ -1073,17 +1068,16 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
|
||||
VMInstanceVO vm = _vmInstanceDao.findById(vmId);
|
||||
if (vm == null) {
|
||||
InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with specified id");
|
||||
ex.addProxyObject(vm, vmId, "vmId");
|
||||
InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with given id");
|
||||
throw ex;
|
||||
}
|
||||
|
||||
if (vm.getState() != State.Running) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("VM is not Running, unable to migrate the vm" + vm);
|
||||
s_logger.debug("VM is not running, cannot migrate the vm" + vm);
|
||||
}
|
||||
InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to" +
|
||||
" migrate the vm with specified id");
|
||||
InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " +
|
||||
"migrate the vm with specified id");
|
||||
ex.addProxyObject(vm, vmId, "vmId");
|
||||
throw ex;
|
||||
}
|
||||
|
|
@ -1441,26 +1435,26 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
public Pair<List<? extends Pod>, Integer> searchForPods(ListPodsByCmd cmd) {
|
||||
String podName = cmd.getPodName();
|
||||
Long id = cmd.getId();
|
||||
Long zoneId = cmd.getZoneId();
|
||||
Long zoneId = cmd.getZoneId();
|
||||
Object keyword = cmd.getKeyword();
|
||||
Object allocationState = cmd.getAllocationState();
|
||||
String zoneType = cmd.getZoneType();
|
||||
zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId);
|
||||
|
||||
|
||||
|
||||
Filter searchFilter = new Filter(HostPodVO.class, "dataCenterId", true, cmd.getStartIndex(), cmd.getPageSizeVal());
|
||||
SearchBuilder<HostPodVO> sb = _hostPodDao.createSearchBuilder();
|
||||
SearchBuilder<HostPodVO> sb = _hostPodDao.createSearchBuilder();
|
||||
sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
|
||||
sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
|
||||
sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
|
||||
sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
|
||||
sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
|
||||
sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ);
|
||||
|
||||
|
||||
if(zoneType != null) {
|
||||
SearchBuilder<DataCenterVO> zoneSb = _dcDao.createSearchBuilder();
|
||||
zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
|
||||
zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
|
||||
sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER);
|
||||
}
|
||||
|
||||
|
||||
SearchCriteria<HostPodVO> sc = sb.create();
|
||||
if (keyword != null) {
|
||||
SearchCriteria<HostPodVO> ssc = _hostPodDao.createSearchCriteria();
|
||||
|
|
@ -1473,23 +1467,23 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
if (id != null) {
|
||||
sc.setParameters("id", id);
|
||||
}
|
||||
|
||||
|
||||
if (podName != null) {
|
||||
sc.setParameters("name", "%" + podName + "%");
|
||||
}
|
||||
|
||||
|
||||
if (zoneId != null) {
|
||||
sc.setParameters("dataCenterId", zoneId);
|
||||
}
|
||||
|
||||
|
||||
if (allocationState != null) {
|
||||
sc.setParameters("allocationState", allocationState);
|
||||
}
|
||||
|
||||
if(zoneType != null) {
|
||||
sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
|
||||
}
|
||||
|
||||
|
||||
if(zoneType != null) {
|
||||
sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
|
||||
}
|
||||
|
||||
Pair<List<HostPodVO>, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter);
|
||||
return new Pair<List<? extends Pod>, Integer>(result.first(), result.second());
|
||||
}
|
||||
|
|
@ -2903,7 +2897,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
cmdList.add(ListAffinityGroupsCmd.class);
|
||||
cmdList.add(UpdateVMAffinityGroupCmd.class);
|
||||
cmdList.add(ListAffinityGroupTypesCmd.class);
|
||||
|
||||
cmdList.add(ListDeploymentPlannersCmd.class);
|
||||
cmdList.add(ReleaseHostReservationCmd.class);
|
||||
cmdList.add(ScaleSystemVMCmd.class);
|
||||
cmdList.add(AddResourceDetailCmd.class);
|
||||
cmdList.add(RemoveResourceDetailCmd.class);
|
||||
cmdList.add(ListResourceDetailsCmd.class);
|
||||
|
|
@ -3105,10 +3101,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
|
||||
if(zoneType != null) {
|
||||
SearchBuilder<DataCenterVO> zoneSb = _dcDao.createSearchBuilder();
|
||||
zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
|
||||
zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ);
|
||||
sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SearchCriteria<VMInstanceVO> sc = sb.create();
|
||||
|
||||
if (keyword != null) {
|
||||
|
|
@ -3150,9 +3146,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
}
|
||||
|
||||
if(zoneType != null) {
|
||||
sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
|
||||
sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType);
|
||||
}
|
||||
|
||||
|
||||
Pair<List<VMInstanceVO>, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter);
|
||||
return new Pair<List<? extends VirtualMachine>, Integer>(result.first(), result.second());
|
||||
}
|
||||
|
|
@ -3677,7 +3673,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
// although we may have race conditioning here, database transaction serialization should
|
||||
// give us the same key
|
||||
if (_hashKey == null) {
|
||||
_hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(),
|
||||
_hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(),
|
||||
getBase64EncodedRandomKey(128));
|
||||
}
|
||||
return _hashKey;
|
||||
|
|
@ -3686,41 +3682,41 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
@Override
|
||||
public String getEncryptionKey() {
|
||||
if (_encryptionKey == null) {
|
||||
_encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(),
|
||||
Config.EncryptionKey.getCategory(),
|
||||
_encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(),
|
||||
Config.EncryptionKey.getCategory(),
|
||||
getBase64EncodedRandomKey(128));
|
||||
}
|
||||
return _encryptionKey;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getEncryptionIV() {
|
||||
if (_encryptionIV == null) {
|
||||
_encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(),
|
||||
Config.EncryptionIV.getCategory(),
|
||||
_encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(),
|
||||
Config.EncryptionIV.getCategory(),
|
||||
getBase64EncodedRandomKey(128));
|
||||
}
|
||||
return _encryptionIV;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public void resetEncryptionKeyIV() {
|
||||
|
||||
|
||||
SearchBuilder<ConfigurationVO> sb = _configDao.createSearchBuilder();
|
||||
sb.and("name1", sb.entity().getName(), SearchCriteria.Op.EQ);
|
||||
sb.or("name2", sb.entity().getName(), SearchCriteria.Op.EQ);
|
||||
sb.done();
|
||||
|
||||
|
||||
SearchCriteria<ConfigurationVO> sc = sb.create();
|
||||
sc.setParameters("name1", Config.EncryptionKey.key());
|
||||
sc.setParameters("name2", Config.EncryptionIV.key());
|
||||
|
||||
|
||||
_configDao.expunge(sc);
|
||||
_encryptionKey = null;
|
||||
_encryptionIV = null;
|
||||
}
|
||||
|
||||
|
||||
private static String getBase64EncodedRandomKey(int nBits) {
|
||||
SecureRandom random;
|
||||
try {
|
||||
|
|
@ -4007,10 +4003,28 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public VirtualMachine upgradeSystemVM(ScaleSystemVMCmd cmd) throws ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException, ConcurrentOperationException {
|
||||
|
||||
boolean result = _userVmMgr.upgradeVirtualMachine(cmd.getId(), cmd.getServiceOfferingId());
|
||||
if(result){
|
||||
VirtualMachine vm = _vmInstanceDao.findById(cmd.getId());
|
||||
return vm;
|
||||
}else{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public VirtualMachine upgradeSystemVM(UpgradeSystemVMCmd cmd) {
|
||||
Long systemVmId = cmd.getId();
|
||||
Long serviceOfferingId = cmd.getServiceOfferingId();
|
||||
return upgradeStoppedSystemVm(systemVmId, serviceOfferingId);
|
||||
|
||||
}
|
||||
|
||||
private VirtualMachine upgradeStoppedSystemVm(Long systemVmId, Long serviceOfferingId){
|
||||
Account caller = UserContext.current().getCaller();
|
||||
|
||||
VMInstanceVO systemVm = _vmInstanceDao.findByIdTypes(systemVmId, VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm);
|
||||
|
|
@ -4056,4 +4070,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> listDeploymentPlanners() {
|
||||
List<String> plannersAvailable = new ArrayList<String>();
|
||||
for (DeploymentPlanner planner : _planners) {
|
||||
plannersAvailable.add(planner.getName());
|
||||
}
|
||||
|
||||
return plannersAvailable;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,9 +22,7 @@ import java.util.Map;
|
|||
|
||||
import com.cloud.agent.api.VmStatsEntry;
|
||||
import com.cloud.api.query.vo.UserVmJoinVO;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.InsufficientCapacityException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.exception.*;
|
||||
import com.cloud.projects.Project.ListProjectResourcesCriteria;
|
||||
import com.cloud.server.Criteria;
|
||||
import com.cloud.user.Account;
|
||||
|
|
@ -94,4 +92,6 @@ public interface UserVmManager extends VirtualMachineGuru<UserVmVO>, UserVmServi
|
|||
|
||||
Pair<UserVmVO, Map<VirtualMachineProfile.Param, Object>> startVirtualMachine(long vmId, Long hostId, Map<VirtualMachineProfile.Param, Object> additionalParams) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
|
||||
|
||||
boolean upgradeVirtualMachine(Long id, Long serviceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -98,7 +98,6 @@ import com.cloud.dc.dao.DataCenterDao;
|
|||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.deploy.DeployDestination;
|
||||
import com.cloud.deploy.DeployPlannerSelector;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.domain.DomainVO;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
|
|
@ -402,9 +401,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
@Inject
|
||||
AffinityGroupDao _affinityGroupDao;
|
||||
|
||||
@Inject
|
||||
List<DeployPlannerSelector> plannerSelectors;
|
||||
|
||||
protected ScheduledExecutorService _executor = null;
|
||||
protected int _expungeInterval;
|
||||
protected int _expungeDelay;
|
||||
|
|
@ -1080,11 +1076,22 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_VM_SCALE, eventDescription = "scaling Vm")
|
||||
public boolean
|
||||
upgradeVirtualMachine(ScaleVMCmd cmd) throws InvalidParameterValueException, ResourceAllocationException {
|
||||
public UserVm
|
||||
upgradeVirtualMachine(ScaleVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException{
|
||||
|
||||
Long vmId = cmd.getId();
|
||||
Long newServiceOfferingId = cmd.getServiceOfferingId();
|
||||
boolean result = upgradeVirtualMachine(vmId, newServiceOfferingId);
|
||||
if(result){
|
||||
return _vmDao.findById(vmId);
|
||||
}else{
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean upgradeVirtualMachine(Long vmId, Long newServiceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException{
|
||||
Account caller = UserContext.current().getCaller();
|
||||
|
||||
// Verify input parameters
|
||||
|
|
@ -1151,9 +1158,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
}
|
||||
|
||||
return success;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public HashMap<Long, VmStatsEntry> getVirtualMachineStatistics(long hostId,
|
||||
String hostName, List<Long> vmIds) throws CloudRuntimeException {
|
||||
|
|
@ -2836,7 +2843,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean finalizeDeployment(Commands cmds,
|
||||
VirtualMachineProfile<UserVmVO> profile, DeployDestination dest,
|
||||
|
|
@ -2898,6 +2905,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
originalIp = nic.getIp4Address();
|
||||
guestNic = nic;
|
||||
guestNetwork = network;
|
||||
// In vmware, we will be effecting pvlan settings in portgroups in StartCommand.
|
||||
if (profile.getHypervisorType() != HypervisorType.VMware) {
|
||||
if (nic.getBroadcastUri().getScheme().equals("pvlan")) {
|
||||
if (!setupVmForPvlan(true, hostId, nic)) {
|
||||
return false;
|
||||
|
|
@ -2905,6 +2914,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
boolean ipChanged = false;
|
||||
if (originalIp != null && !originalIp.equalsIgnoreCase(returnedIp)) {
|
||||
if (returnedIp != null && guestNic != null) {
|
||||
|
|
@ -3033,7 +3043,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
+ " stop due to exception ", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VMInstanceVO vm = profile.getVirtualMachine();
|
||||
List<NicVO> nics = _nicDao.listByVmId(vm.getId());
|
||||
for (NicVO nic : nics) {
|
||||
|
|
@ -3171,15 +3181,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
|
||||
VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid());
|
||||
|
||||
String plannerName = null;
|
||||
for (DeployPlannerSelector dps : plannerSelectors) {
|
||||
plannerName = dps.selectPlanner(vm);
|
||||
if (plannerName != null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Get serviceOffering for Virtual Machine
|
||||
ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId());
|
||||
String plannerName = offering.getDeploymentPlanner();
|
||||
if (plannerName == null) {
|
||||
throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType()));
|
||||
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
|
||||
plannerName = "BareMetalPlanner";
|
||||
} else {
|
||||
plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
|
||||
}
|
||||
}
|
||||
|
||||
String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString());
|
||||
|
|
@ -3823,7 +3833,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
+ cmd.getAccountName() + " is disabled.");
|
||||
}
|
||||
|
||||
//check caller has access to both the old and new account
|
||||
//check caller has access to both the old and new account
|
||||
_accountMgr.checkAccess(caller, null, true, oldAccount);
|
||||
_accountMgr.checkAccess(caller, null, true, newAccount);
|
||||
|
||||
|
|
@ -4336,7 +4346,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
|
|||
UserVmVO vmVO = _vmDao.findById(vm.getId());
|
||||
if (vmVO.getState() == State.Running) {
|
||||
try {
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(nic,vm.getName());
|
||||
PlugNicCommand plugNicCmd = new PlugNicCommand(nic,vm.getName(), vm.getType());
|
||||
Commands cmds = new Commands(OnError.Stop);
|
||||
cmds.addCommand("plugnic",plugNicCmd);
|
||||
_agentMgr.send(dest.getHost().getId(),cmds);
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR
|
|||
long gslbRuleId = assignToGslbCmd.getGlobalLoadBalancerRuleId();
|
||||
GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId);
|
||||
if (gslbRule == null) {
|
||||
throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRule.getUuid());
|
||||
throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId);
|
||||
}
|
||||
|
||||
_accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule);
|
||||
|
|
@ -282,7 +282,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR
|
|||
long gslbRuleId = removeFromGslbCmd.getGlobalLoadBalancerRuleId();
|
||||
GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId);
|
||||
if (gslbRule == null) {
|
||||
throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRule.getUuid());
|
||||
throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId);
|
||||
}
|
||||
|
||||
_accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule);
|
||||
|
|
|
|||
|
|
@ -608,4 +608,10 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean releaseHostReservation(Long hostId) {
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,359 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.vm;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.storage.dao.GuestOSCategoryDao;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.deploy.DeployDestination;
|
||||
import com.cloud.deploy.DeploymentClusterPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
|
||||
import com.cloud.deploy.DeploymentPlanningManagerImpl;
|
||||
import com.cloud.deploy.FirstFitPlanner;
|
||||
import com.cloud.deploy.PlannerHostReservationVO;
|
||||
import com.cloud.deploy.dao.PlannerHostReservationDao;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.framework.messagebus.MessageBus;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.test.utils.SpringUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mockito;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.ComponentScan;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.FilterType;
|
||||
import org.springframework.context.annotation.ComponentScan.Filter;
|
||||
import org.springframework.core.type.classreading.MetadataReader;
|
||||
import org.springframework.core.type.classreading.MetadataReaderFactory;
|
||||
import org.springframework.core.type.filter.TypeFilter;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.test.context.support.AnnotationConfigContextLoader;
|
||||
|
||||
import com.cloud.exception.AffinityConflictException;
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
|
||||
public class DeploymentPlanningManagerImplTest {
|
||||
|
||||
@Inject
|
||||
DeploymentPlanningManagerImpl _dpm;
|
||||
|
||||
@Inject
|
||||
PlannerHostReservationDao _plannerHostReserveDao;
|
||||
|
||||
@Inject VirtualMachineProfileImpl vmProfile;
|
||||
|
||||
@Inject
|
||||
AffinityGroupVMMapDao _affinityGroupVMMapDao;
|
||||
|
||||
@Inject
|
||||
ExcludeList avoids;
|
||||
|
||||
@Inject
|
||||
DataCenterVO dc;
|
||||
|
||||
@Inject
|
||||
DataCenterDao _dcDao;
|
||||
|
||||
@Inject
|
||||
FirstFitPlanner _planner;
|
||||
|
||||
@Inject
|
||||
ClusterDao _clusterDao;
|
||||
|
||||
private static long domainId = 5L;
|
||||
|
||||
private static long dataCenterId = 1L;
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws ConfigurationException {
|
||||
}
|
||||
|
||||
@Before
|
||||
public void testSetUp() {
|
||||
ComponentContext.initComponentsLifeCycle();
|
||||
|
||||
PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared);
|
||||
Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO);
|
||||
Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO);
|
||||
Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L);
|
||||
|
||||
VMInstanceVO vm = new VMInstanceVO();
|
||||
Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm);
|
||||
|
||||
Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc);
|
||||
Mockito.when(dc.getId()).thenReturn(dataCenterId);
|
||||
|
||||
ClusterVO clusterVO = new ClusterVO();
|
||||
clusterVO.setHypervisorType(HypervisorType.XenServer.toString());
|
||||
Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO);
|
||||
|
||||
Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner");
|
||||
List<DeploymentPlanner> planners = new ArrayList<DeploymentPlanner>();
|
||||
planners.add(_planner);
|
||||
_dpm.setPlanners(planners);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException {
|
||||
ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
|
||||
"test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner");
|
||||
Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
|
||||
|
||||
DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
|
||||
|
||||
Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(true);
|
||||
DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
|
||||
assertNull("DataCenter is in avoid set, destination should be null! ", dest);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException {
|
||||
ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
|
||||
"test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null,
|
||||
"UserDispersingPlanner");
|
||||
Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
|
||||
|
||||
DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
|
||||
Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false);
|
||||
|
||||
Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false);
|
||||
DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
|
||||
assertNull("Planner cannot handle, destination should be null! ", dest);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException {
|
||||
ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
|
||||
"test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner");
|
||||
Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
|
||||
|
||||
DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
|
||||
Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false);
|
||||
Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true);
|
||||
|
||||
Mockito.when(((DeploymentClusterPlanner) _planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null);
|
||||
DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
|
||||
assertNull("Planner cannot handle, destination should be null! ", dest);
|
||||
}
|
||||
|
||||
|
||||
@Configuration
|
||||
@ComponentScan(basePackageClasses = { DeploymentPlanningManagerImpl.class }, includeFilters = { @Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM) }, useDefaultFilters = false)
|
||||
public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration {
|
||||
|
||||
@Bean
|
||||
public FirstFitPlanner firstFitPlanner() {
|
||||
return Mockito.mock(FirstFitPlanner.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DeploymentPlanner deploymentPlanner() {
|
||||
return Mockito.mock(DeploymentPlanner.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DataCenterVO dataCenter() {
|
||||
return Mockito.mock(DataCenterVO.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ExcludeList excludeList() {
|
||||
return Mockito.mock(ExcludeList.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public VirtualMachineProfileImpl virtualMachineProfileImpl() {
|
||||
return Mockito.mock(VirtualMachineProfileImpl.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ClusterDetailsDao clusterDetailsDao() {
|
||||
return Mockito.mock(ClusterDetailsDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DataStoreManager cataStoreManager() {
|
||||
return Mockito.mock(DataStoreManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StorageManager storageManager() {
|
||||
return Mockito.mock(StorageManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public HostDao hostDao() {
|
||||
return Mockito.mock(HostDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public HostPodDao hostPodDao() {
|
||||
return Mockito.mock(HostPodDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ClusterDao clusterDao() {
|
||||
return Mockito.mock(ClusterDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GuestOSDao guestOSDao() {
|
||||
return Mockito.mock(GuestOSDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GuestOSCategoryDao guestOSCategoryDao() {
|
||||
return Mockito.mock(GuestOSCategoryDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CapacityManager capacityManager() {
|
||||
return Mockito.mock(CapacityManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StoragePoolHostDao storagePoolHostDao() {
|
||||
return Mockito.mock(StoragePoolHostDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public VolumeDao volumeDao() {
|
||||
return Mockito.mock(VolumeDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ConfigurationDao configurationDao() {
|
||||
return Mockito.mock(ConfigurationDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DiskOfferingDao diskOfferingDao() {
|
||||
return Mockito.mock(DiskOfferingDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public PrimaryDataStoreDao primaryDataStoreDao() {
|
||||
return Mockito.mock(PrimaryDataStoreDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CapacityDao capacityDao() {
|
||||
return Mockito.mock(CapacityDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public PlannerHostReservationDao plannerHostReservationDao() {
|
||||
return Mockito.mock(PlannerHostReservationDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AffinityGroupProcessor affinityGroupProcessor() {
|
||||
return Mockito.mock(AffinityGroupProcessor.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AffinityGroupDao affinityGroupDao() {
|
||||
return Mockito.mock(AffinityGroupDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AffinityGroupVMMapDao affinityGroupVMMapDao() {
|
||||
return Mockito.mock(AffinityGroupVMMapDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AccountManager accountManager() {
|
||||
return Mockito.mock(AccountManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AgentManager agentManager() {
|
||||
return Mockito.mock(AgentManager.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MessageBus messageBus() {
|
||||
return Mockito.mock(MessageBus.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public UserVmDao userVMDao() {
|
||||
return Mockito.mock(UserVmDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public VMInstanceDao vmInstanceDao() {
|
||||
return Mockito.mock(VMInstanceDao.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DataCenterDao dataCenterDao() {
|
||||
return Mockito.mock(DataCenterDao.class);
|
||||
}
|
||||
|
||||
public static class Library implements TypeFilter {
|
||||
|
||||
@Override
|
||||
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
|
||||
ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class);
|
||||
return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -409,8 +409,8 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager,
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean upgradeVirtualMachine(ScaleVMCmd scaleVMCmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException {
|
||||
return false; //To change body of implemented methods use File | Settings | File Templates.
|
||||
public UserVm upgradeVirtualMachine(ScaleVMCmd scaleVMCmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException {
|
||||
return null; //To change body of implemented methods use File | Settings | File Templates.
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -420,6 +420,11 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager,
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean upgradeVirtualMachine(Long id, Long serviceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException {
|
||||
return false; //To change body of implemented methods use File | Settings | File Templates.
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<UserVmVO> profile) {
|
||||
// TODO Auto-generated method stub
|
||||
|
|
|
|||
|
|
@ -431,7 +431,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu
|
|||
*/
|
||||
@Override
|
||||
public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA,
|
||||
boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) {
|
||||
boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner, Map<String, String> details) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,14 +19,8 @@ package org.apache.cloudstack.networkoffering;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.dao.*;
|
||||
import com.cloud.server.ConfigurationServer;
|
||||
import com.cloud.user.*;
|
||||
import org.apache.cloudstack.acl.SecurityChecker;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.test.utils.SpringUtils;
|
||||
import org.mockito.Mockito;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
|
|
@ -44,6 +38,18 @@ import com.cloud.api.query.dao.UserAccountJoinDaoImpl;
|
|||
import com.cloud.capacity.dao.CapacityDaoImpl;
|
||||
import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.dao.AccountVlanMapDaoImpl;
|
||||
import com.cloud.dc.dao.ClusterDaoImpl;
|
||||
import com.cloud.dc.dao.DataCenterDaoImpl;
|
||||
import com.cloud.dc.dao.DataCenterIpAddressDaoImpl;
|
||||
import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao;
|
||||
import com.cloud.dc.dao.DataCenterVnetDaoImpl;
|
||||
import com.cloud.dc.dao.DcDetailsDaoImpl;
|
||||
import com.cloud.dc.dao.HostPodDaoImpl;
|
||||
import com.cloud.dc.dao.PodVlanDaoImpl;
|
||||
import com.cloud.dc.dao.PodVlanMapDaoImpl;
|
||||
import com.cloud.dc.dao.VlanDaoImpl;
|
||||
import com.cloud.domain.dao.DomainDaoImpl;
|
||||
import com.cloud.event.dao.UsageEventDaoImpl;
|
||||
import com.cloud.host.dao.HostDaoImpl;
|
||||
|
|
@ -80,9 +86,11 @@ import com.cloud.network.vpc.dao.PrivateIpDaoImpl;
|
|||
import com.cloud.network.vpn.RemoteAccessVpnService;
|
||||
import com.cloud.offerings.dao.NetworkOfferingDao;
|
||||
import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
|
||||
import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl;
|
||||
import com.cloud.projects.ProjectManager;
|
||||
import com.cloud.server.ConfigurationServer;
|
||||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.service.dao.ServiceOfferingDaoImpl;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDaoImpl;
|
||||
import com.cloud.storage.dao.DiskOfferingDaoImpl;
|
||||
import com.cloud.storage.dao.S3DaoImpl;
|
||||
import com.cloud.storage.dao.SnapshotDaoImpl;
|
||||
|
|
@ -93,6 +101,11 @@ import com.cloud.storage.s3.S3Manager;
|
|||
import com.cloud.storage.secondary.SecondaryStorageVmManager;
|
||||
import com.cloud.storage.swift.SwiftManager;
|
||||
import com.cloud.tags.dao.ResourceTagsDaoImpl;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.user.UserContext;
|
||||
import com.cloud.user.UserContextInitializer;
|
||||
import com.cloud.user.dao.AccountDaoImpl;
|
||||
import com.cloud.user.dao.UserDaoImpl;
|
||||
import com.cloud.vm.dao.InstanceGroupDaoImpl;
|
||||
|
|
@ -109,6 +122,7 @@ import com.cloud.vm.dao.VMInstanceDaoImpl;
|
|||
DomainDaoImpl.class,
|
||||
SwiftDaoImpl.class,
|
||||
ServiceOfferingDaoImpl.class,
|
||||
ServiceOfferingDetailsDaoImpl.class,
|
||||
VlanDaoImpl.class,
|
||||
IPAddressDaoImpl.class,
|
||||
ResourceTagsDaoImpl.class,
|
||||
|
|
@ -155,162 +169,167 @@ useDefaultFilters=false
|
|||
)
|
||||
|
||||
public class ChildTestConfiguration {
|
||||
|
||||
|
||||
@Bean
|
||||
public ManagementService managementService() {
|
||||
return Mockito.mock(ManagementService.class);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AccountManager acctMgr() {
|
||||
return Mockito.mock(AccountManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkService ntwkSvc() {
|
||||
return Mockito.mock(NetworkService.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkModel ntwkMdl() {
|
||||
return Mockito.mock(NetworkModel.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public AlertManager alertMgr() {
|
||||
return Mockito.mock(AlertManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public SecurityChecker securityChkr() {
|
||||
return Mockito.mock(SecurityChecker.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public ResourceLimitService resourceSvc() {
|
||||
return Mockito.mock(ResourceLimitService.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public ProjectManager projectMgr() {
|
||||
return Mockito.mock(ProjectManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public SecondaryStorageVmManager ssvmMgr() {
|
||||
return Mockito.mock(SecondaryStorageVmManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public SwiftManager swiftMgr() {
|
||||
return Mockito.mock(SwiftManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public S3Manager s3Mgr() {
|
||||
return Mockito.mock(S3Manager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public VpcManager vpcMgr() {
|
||||
return Mockito.mock(VpcManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public UserVmDao userVMDao() {
|
||||
return Mockito.mock(UserVmDao.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public RulesManager rulesMgr() {
|
||||
return Mockito.mock(RulesManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public LoadBalancingRulesManager lbRulesMgr() {
|
||||
return Mockito.mock(LoadBalancingRulesManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public RemoteAccessVpnService vpnMgr() {
|
||||
return Mockito.mock(RemoteAccessVpnService.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkGuru ntwkGuru() {
|
||||
return Mockito.mock(NetworkGuru.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkElement ntwkElement() {
|
||||
return Mockito.mock(NetworkElement.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public IpDeployer ipDeployer() {
|
||||
return Mockito.mock(IpDeployer.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public DhcpServiceProvider dhcpProvider() {
|
||||
return Mockito.mock(DhcpServiceProvider.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public FirewallManager firewallMgr() {
|
||||
return Mockito.mock(FirewallManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public AgentManager agentMgr() {
|
||||
return Mockito.mock(AgentManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public StorageNetworkManager storageNtwkMgr() {
|
||||
return Mockito.mock(StorageNetworkManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkACLManager ntwkAclMgr() {
|
||||
return Mockito.mock(NetworkACLManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public Ipv6AddressManager ipv6Mgr() {
|
||||
return Mockito.mock(Ipv6AddressManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public ConfigurationDao configDao() {
|
||||
return Mockito.mock(ConfigurationDao.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public UserContext userContext() {
|
||||
return Mockito.mock(UserContext.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public UserContextInitializer userContextInitializer() {
|
||||
return Mockito.mock(UserContextInitializer.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkManager networkManager() {
|
||||
return Mockito.mock(NetworkManager.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkOfferingDao networkOfferingDao() {
|
||||
return Mockito.mock(NetworkOfferingDao.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkDao networkDao() {
|
||||
return Mockito.mock(NetworkDao.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public NetworkOfferingServiceMapDao networkOfferingServiceMapDao() {
|
||||
return Mockito.mock(NetworkOfferingServiceMapDao.class);
|
||||
}
|
||||
|
||||
|
||||
@Bean
|
||||
public DataCenterLinkLocalIpAddressDao datacenterLinkLocalIpAddressDao() {
|
||||
return Mockito.mock(DataCenterLinkLocalIpAddressDao.class);
|
||||
|
|
@ -342,5 +361,5 @@ public class ChildTestConfiguration {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
#set -x
|
||||
|
||||
usage() {
|
||||
printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-e]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -e - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n\n" $(basename $0) >&2
|
||||
printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-n] [-z]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -n - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n -z - do restart only for the instances in the specific zone. If not specified, restart will apply to instances in all zones\n\n" $(basename $0) >&2
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -37,9 +37,12 @@ password=
|
|||
help=
|
||||
maxthreads=10
|
||||
LOGFILE=cloud.log
|
||||
zone=""
|
||||
inzone=""
|
||||
|
||||
|
||||
while getopts 'sarhnd:m:u:p:t:l:' OPTION
|
||||
|
||||
while getopts 'sarhnd:m:u:p:t:l:z:' OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
s) system=1
|
||||
|
|
@ -63,6 +66,9 @@ do
|
|||
t) maxthreads="$OPTARG"
|
||||
;;
|
||||
l) LOGFILE="$OPTARG"
|
||||
;;
|
||||
z) zone=" AND data_center_id=""$OPTARG"
|
||||
inzone=" in zone id=""$OPTARG"
|
||||
esac
|
||||
done
|
||||
|
||||
|
|
@ -70,14 +76,14 @@ done
|
|||
|
||||
|
||||
stop_start_system() {
|
||||
secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"SecondaryStorageVm\""`)
|
||||
console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"ConsoleProxy\""`)
|
||||
secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"SecondaryStorageVm\"$zone"`)
|
||||
console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"ConsoleProxy\"$zone"`)
|
||||
length_secondary=(${#secondary[@]})
|
||||
length_console=(${#console[@]})
|
||||
|
||||
|
||||
echo -e "\nStopping and starting $length_secondary secondary storage vm(s)..."
|
||||
echo -e "Stopping and starting $length_secondary secondary storage vm(s)..." >>$LOGFILE
|
||||
echo -e "\nStopping and starting $length_secondary secondary storage vm(s)$inzone..."
|
||||
echo -e "Stopping and starting $length_secondary secondary storage vm(s)$inzone..." >>$LOGFILE
|
||||
|
||||
for d in "${secondary[@]}"; do
|
||||
echo "INFO: Stopping secondary storage vm with id $d" >>$LOGFILE
|
||||
|
|
@ -98,12 +104,12 @@ done
|
|||
if [ "$length_secondary" == "0" ];then
|
||||
echo -e "No running secondary storage vms found \n"
|
||||
else
|
||||
echo -e "Done stopping and starting secondary storage vm(s)"
|
||||
echo -e "Done stopping and starting secondary storage vm(s)." >>$LOGFILE
|
||||
echo -e "Done stopping and starting secondary storage vm(s)$inzone"
|
||||
echo -e "Done stopping and starting secondary storage vm(s)$inzone." >>$LOGFILE
|
||||
fi
|
||||
|
||||
echo -e "\nStopping and starting $length_console console proxy vm(s)..."
|
||||
echo -e "Stopping and starting $length_console console proxy vm(s)..." >>$LOGFILE
|
||||
echo -e "\nStopping and starting $length_console console proxy vm(s)$inzone..."
|
||||
echo -e "Stopping and starting $length_console console proxy vm(s)$inzone..." >>$LOGFILE
|
||||
|
||||
for d in "${console[@]}"; do
|
||||
echo "INFO: Stopping console proxy with id $d" >>$LOGFILE
|
||||
|
|
@ -124,17 +130,17 @@ done
|
|||
if [ "$length_console" == "0" ];then
|
||||
echo -e "No running console proxy vms found \n"
|
||||
else
|
||||
echo "Done stopping and starting console proxy vm(s)."
|
||||
echo "Done stopping and starting console proxy vm(s)." >>$LOGFILE
|
||||
echo "Done stopping and starting console proxy vm(s) $inzone."
|
||||
echo "Done stopping and starting console proxy vm(s) $inzone." >>$LOGFILE
|
||||
fi
|
||||
}
|
||||
|
||||
stop_start_router() {
|
||||
router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\""`)
|
||||
router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone"`)
|
||||
length_router=(${#router[@]})
|
||||
|
||||
echo -e "\nStopping and starting $length_router running routing vm(s)... "
|
||||
echo -e "Stopping and starting $length_router running routing vm(s)... " >>$LOGFILE
|
||||
echo -e "\nStopping and starting $length_router running routing vm(s)$inzone... "
|
||||
echo -e "Stopping and starting $length_router running routing vm(s)$inzone... " >>$LOGFILE
|
||||
|
||||
#Spawn reboot router in parallel - run commands in <n> chunks - number of threads is configurable
|
||||
|
||||
|
|
@ -185,8 +191,8 @@ stop_start_router() {
|
|||
sleep 10
|
||||
done
|
||||
|
||||
echo -e "Done restarting router(s). \n"
|
||||
echo -e "Done restarting router(s). \n" >>$LOGFILE
|
||||
echo -e "Done restarting router(s)$inzone. \n"
|
||||
echo -e "Done restarting router(s)$inzone. \n" >>$LOGFILE
|
||||
|
||||
fi
|
||||
}
|
||||
|
|
@ -231,11 +237,11 @@ reboot_router(){
|
|||
|
||||
restart_networks(){
|
||||
networks=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select n.id
|
||||
from networks n, network_offerings no where n.network_offering_id = no.id and no.system_only = 0 and n.removed is null"`)
|
||||
from networks n, network_offerings no where n.network_offering_id = no.id and no.system_only = 0 and n.removed is null$zone"`)
|
||||
length_networks=(${#networks[@]})
|
||||
|
||||
echo -e "\nRestarting networks... "
|
||||
echo -e "Restarting networks... " >>$LOGFILE
|
||||
echo -e "\nRestarting $length_networks networks$inzone... "
|
||||
echo -e "Restarting $length_networks networks$inzone... " >>$LOGFILE
|
||||
|
||||
#Spawn restart network in parallel - run commands in <n> chunks - number of threads is configurable
|
||||
|
||||
|
|
@ -287,8 +293,8 @@ restart_networks(){
|
|||
sleep 10
|
||||
done
|
||||
|
||||
echo -e "Done restarting networks. \n"
|
||||
echo -e "Done restarting networks. \n" >>$LOGFILE
|
||||
echo -e "Done restarting networks$inzone. \n"
|
||||
echo -e "Done restarting networks$inzone. \n" >>$LOGFILE
|
||||
|
||||
fi
|
||||
}
|
||||
|
|
|
|||
|
|
@ -393,7 +393,16 @@ CREATE TABLE `cloud`.`vm_snapshots` (
|
|||
ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `vm_snapshot_enabled` tinyint(1) DEFAULT 0 NOT NULL COMMENT 'Whether VM snapshot is supported by hypervisor';
|
||||
UPDATE `cloud`.`hypervisor_capabilities` SET `vm_snapshot_enabled`=1 WHERE `hypervisor_type` in ('VMware', 'XenServer');
|
||||
|
||||
|
||||
CREATE TABLE `cloud`.`service_offering_details` (
|
||||
`id` bigint unsigned NOT NULL auto_increment,
|
||||
`service_offering_id` bigint unsigned NOT NULL COMMENT 'service offering id',
|
||||
`name` varchar(255) NOT NULL,
|
||||
`value` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT UNIQUE KEY `uk_service_offering_id_name` (`service_offering_id`, `name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
DROP VIEW IF EXISTS `cloud`.`user_vm_view`;
|
||||
CREATE VIEW `cloud`.`user_vm_view` AS
|
||||
select
|
||||
|
|
@ -973,9 +982,61 @@ CREATE TABLE `cloud`.`network_asa1000v_map` (
|
|||
|
||||
ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `eip_associate_public_ip` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if public IP is associated with user VM creation by default when EIP service is enabled.' AFTER `elastic_ip_service`;
|
||||
|
||||
-- Re-enable foreign key checking, at the end of the upgrade path
|
||||
SET foreign_key_checks = 1;
|
||||
|
||||
CREATE TABLE `cloud`.`op_host_planner_reservation` (
|
||||
`id` bigint unsigned NOT NULL auto_increment,
|
||||
`data_center_id` bigint unsigned NOT NULL,
|
||||
`pod_id` bigint unsigned,
|
||||
`cluster_id` bigint unsigned,
|
||||
`host_id` bigint unsigned,
|
||||
`resource_usage` varchar(255) COMMENT 'Shared(between planners) Vs Dedicated (exclusive usage to a planner)',
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `i_op_host_planner_reservation__host_resource_usage`(`host_id`, `resource_usage`),
|
||||
CONSTRAINT `fk_planner_reservation__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_planner_reservation__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `cloud`.`data_center`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_planner_reservation__pod_id` FOREIGN KEY (`pod_id`) REFERENCES `cloud`.`host_pod_ref`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_planner_reservation__cluster_id` FOREIGN KEY (`cluster_id`) REFERENCES `cloud`.`cluster`(`id`) ON DELETE CASCADE
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
ALTER TABLE `cloud`.`service_offering` ADD COLUMN `deployment_planner` varchar(255) COMMENT 'Planner heuristics used to deploy a VM of this offering; if null global config vm.deployment.planner is used';
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.deployment.planner', 'FirstFitPlanner', '[''FirstFitPlanner'', ''UserDispersingPlanner'', ''UserConcentratedPodPlanner'']: DeploymentPlanner heuristic that will be used for VM deployment.');
|
||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'host.reservation.release.period', '300000', 'The interval in milliseconds between host reservation release checks');
|
||||
|
||||
DROP VIEW IF EXISTS `cloud`.`service_offering_view`;
|
||||
CREATE VIEW `cloud`.`service_offering_view` AS
|
||||
select
|
||||
service_offering.id,
|
||||
disk_offering.uuid,
|
||||
disk_offering.name,
|
||||
disk_offering.display_text,
|
||||
disk_offering.created,
|
||||
disk_offering.tags,
|
||||
disk_offering.removed,
|
||||
disk_offering.use_local_storage,
|
||||
disk_offering.system_use,
|
||||
service_offering.cpu,
|
||||
service_offering.speed,
|
||||
service_offering.ram_size,
|
||||
service_offering.nw_rate,
|
||||
service_offering.mc_rate,
|
||||
service_offering.ha_enabled,
|
||||
service_offering.limit_cpu_use,
|
||||
service_offering.host_tag,
|
||||
service_offering.default_use,
|
||||
service_offering.vm_type,
|
||||
service_offering.sort_key,
|
||||
service_offering.deployment_planner,
|
||||
domain.id domain_id,
|
||||
domain.uuid domain_uuid,
|
||||
domain.name domain_name,
|
||||
domain.path domain_path
|
||||
from
|
||||
`cloud`.`service_offering`
|
||||
inner join
|
||||
`cloud`.`disk_offering` ON service_offering.id = disk_offering.id
|
||||
left join
|
||||
`cloud`.`domain` ON disk_offering.domain_id = domain.id;
|
||||
|
||||
-- Add "default" field to account/user tables
|
||||
ALTER TABLE `cloud`.`account` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if account is default';
|
||||
|
|
@ -1605,3 +1666,8 @@ CREATE TABLE `cloud`.`nic_ip_alias` (
|
|||
|
||||
alter table `cloud`.`vpc_gateways` add column network_acl_id bigint unsigned default 1 NOT NULL;
|
||||
update `cloud`.`vpc_gateways` set network_acl_id = 2;
|
||||
|
||||
-- Re-enable foreign key checking, at the end of the upgrade path
|
||||
SET foreign_key_checks = 1;
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,232 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
""" P1 tests for Storage motion
|
||||
"""
|
||||
#Import Local Modules
|
||||
import marvin
|
||||
from marvin.cloudstackTestCase import *
|
||||
from marvin.cloudstackAPI import *
|
||||
from marvin.remoteSSHClient import remoteSSHClient
|
||||
from marvin.integration.lib.utils import *
|
||||
from marvin.integration.lib.base import *
|
||||
from marvin.integration.lib.common import *
|
||||
from nose.plugins.attrib import attr
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
_multiprocess_shared_ = True
|
||||
class Services:
|
||||
"""Test VM Life Cycle Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"disk_offering":{
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"disksize": 1
|
||||
},
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended in create account to
|
||||
# ensure unique username generated each time
|
||||
"password": "password",
|
||||
},
|
||||
"small":
|
||||
# Create a small virtual machine instance with disk offering
|
||||
{
|
||||
"displayname": "testserver",
|
||||
"username": "root", # VM creds for SSH
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"hypervisor": 'XenServer',
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"service_offerings":
|
||||
{
|
||||
"implicitplanner":
|
||||
{
|
||||
# Small service offering ID to for change VM
|
||||
# service offering from medium to small
|
||||
"name": "Implicit Strict",
|
||||
"displaytext": "Implicit Strict",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 500,
|
||||
"memory": 512,
|
||||
"deploymentplanner": "ImplicitDedicationPlanner"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"displaytext": "Cent OS Template",
|
||||
"name": "Cent OS Template",
|
||||
"passwordenabled": True,
|
||||
},
|
||||
"diskdevice": '/dev/xvdd',
|
||||
# Disk device where ISO is attached to instance
|
||||
"mount_dir": "/mnt/tmp",
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
#Migrate VM to hostid
|
||||
"ostype": 'CentOS 5.3 (64-bit)',
|
||||
# CentOS 5.3 (64-bit)
|
||||
}
|
||||
|
||||
class TestImplicitPlanner(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(TestImplicitPlanner, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
cls.services['mode'] = cls.zone.networktype
|
||||
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostype"]
|
||||
)
|
||||
# Set Zones and disk offerings
|
||||
cls.services["small"]["zoneid"] = cls.zone.id
|
||||
cls.services["small"]["template"] = template.id
|
||||
|
||||
# Create VMs, NAT Rules etc
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=domain.id
|
||||
)
|
||||
|
||||
cls.small_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offerings"]["implicitplanner"]
|
||||
)
|
||||
|
||||
cls._cleanup = [
|
||||
cls.small_offering,
|
||||
cls.account
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.api_client = super(TestImplicitPlanner, cls).getClsTestClient().getApiClient()
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
#Clean up, terminate the created ISOs
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
return
|
||||
|
||||
# This test requires multi host and at least one host which is empty (no vms should
|
||||
# be running on that host). It uses an implicit planner to deploy instances and the
|
||||
# instances of a new account should go to an host that doesn't have vms of any other
|
||||
# account.
|
||||
@attr(tags = ["advanced", "basic", "multihosts", "implicitplanner"])
|
||||
def test_01_deploy_vm_with_implicit_planner(self):
|
||||
"""Test implicit planner is placing vms of an account on implicitly dedicated hosts.
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. Deploy a vm using implicit planner. It should go on to a
|
||||
# host that is empty (not running vms of any other account)
|
||||
# 2. Deploy another vm it should get deployed on the same host.
|
||||
|
||||
#create a virtual machine
|
||||
virtual_machine_1 = VirtualMachine.create(
|
||||
self.api_client,
|
||||
self.services["small"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.small_offering.id,
|
||||
mode=self.services["mode"]
|
||||
)
|
||||
|
||||
list_vm_response_1 = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=virtual_machine_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response_1, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_vm_response_1,
|
||||
None,
|
||||
"Check virtual machine is listVirtualMachines"
|
||||
)
|
||||
|
||||
vm_response_1 = list_vm_response_1[0]
|
||||
|
||||
self.assertEqual(
|
||||
vm_response_1.id,
|
||||
virtual_machine_1.id,
|
||||
"Check virtual machine ID of VM"
|
||||
)
|
||||
|
||||
virtual_machine_2 = VirtualMachine.create(
|
||||
self.api_client,
|
||||
self.services["small"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.small_offering.id,
|
||||
mode=self.services["mode"]
|
||||
)
|
||||
|
||||
list_vm_response_2 = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=virtual_machine_2.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response_2, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_vm_response_2,
|
||||
None,
|
||||
"Check virtual machine is listVirtualMachines"
|
||||
)
|
||||
|
||||
vm_response_2 = list_vm_response_2[0]
|
||||
|
||||
self.assertEqual(
|
||||
vm_response_2.id,
|
||||
virtual_machine_2.id,
|
||||
"Check virtual machine ID of VM"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
vm_response_1.hostid,
|
||||
vm_response_2.hostid,
|
||||
"Check both vms have the same host id"
|
||||
)
|
||||
return
|
||||
|
|
@ -0,0 +1,164 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
#!/usr/bin/env python
|
||||
|
||||
import marvin
|
||||
from marvin import cloudstackTestCase
|
||||
from marvin.cloudstackTestCase import *
|
||||
|
||||
import unittest
|
||||
import hashlib
|
||||
import random
|
||||
|
||||
class TestDeployVmWithVariedPlanners(cloudstackTestCase):
|
||||
"""
|
||||
This test tests that we can create serviceOfferings with different deployment Planners and deploy virtual machines into a user account
|
||||
using these service offerings and builtin template
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
CloudStack internally saves its passwords in md5 form and that is how we
|
||||
specify it in the API. Python's hashlib library helps us to quickly hash
|
||||
strings as follows
|
||||
"""
|
||||
mdf = hashlib.md5()
|
||||
mdf.update('password')
|
||||
mdf_pass = mdf.hexdigest()
|
||||
|
||||
self.apiClient = self.testClient.getApiClient() #Get ourselves an API client
|
||||
|
||||
self.acct = createAccount.createAccountCmd() #The createAccount command
|
||||
self.acct.accounttype = 0 #We need a regular user. admins have accounttype=1
|
||||
self.acct.firstname = 'test'
|
||||
self.acct.lastname = 'user' #What's up doc?
|
||||
self.acct.username = 'testuser'
|
||||
self.acct.password = mdf_pass #The md5 hashed password string
|
||||
self.acct.email = 'test@domain.com'
|
||||
self.acct.account = 'testacct'
|
||||
self.acct.domainid = 1 #The default ROOT domain
|
||||
self.acctResponse = self.apiClient.createAccount(self.acct)
|
||||
# And upon successful creation we'll log a helpful message in our logs
|
||||
# using the default debug logger of the test framework
|
||||
self.debug("successfully created account: %s, id: \
|
||||
%s"%(self.acctResponse.name, \
|
||||
self.acctResponse.id))
|
||||
|
||||
#Create service offerings with varied planners
|
||||
self.svcOfferingFirstFit = createServiceOffering.createServiceOfferingCmd()
|
||||
self.svcOfferingFirstFit.name = 'Tiny Instance FirstFit'
|
||||
self.svcOfferingFirstFit.displaytext = 'Tiny Instance with FirstFitPlanner'
|
||||
self.svcOfferingFirstFit.cpuspeed = 100
|
||||
self.svcOfferingFirstFit.cpunumber = 1
|
||||
self.svcOfferingFirstFit.memory = 256
|
||||
self.svcOfferingFirstFit.deploymentplanner = 'FirstFitPlanner'
|
||||
self.svcOfferingFirstFitResponse = self.apiClient.createServiceOffering(self.svcOfferingFirstFit)
|
||||
|
||||
self.debug("successfully created serviceofferring name: %s, id: \
|
||||
%s, deploymentPlanner: %s"%(self.svcOfferingFirstFitResponse.name, \
|
||||
self.svcOfferingFirstFitResponse.id,self.svcOfferingFirstFitResponse.deploymentplanner))
|
||||
|
||||
#Create service offerings with varied planners
|
||||
self.svcOfferingUserDispersing = createServiceOffering.createServiceOfferingCmd()
|
||||
self.svcOfferingUserDispersing.name = 'Tiny Instance UserDispersing'
|
||||
self.svcOfferingUserDispersing.displaytext = 'Tiny Instance with UserDispersingPlanner'
|
||||
self.svcOfferingUserDispersing.cpuspeed = 100
|
||||
self.svcOfferingUserDispersing.cpunumber = 1
|
||||
self.svcOfferingUserDispersing.memory = 256
|
||||
self.svcOfferingUserDispersing.deploymentplanner = 'FirstFitPlanner'
|
||||
self.svcOfferingUserDispersingResponse = self.apiClient.createServiceOffering(self.svcOfferingUserDispersing)
|
||||
|
||||
self.debug("successfully created serviceofferring name: %s, id: \
|
||||
%s, deploymentPlanner: %s"%(self.svcOfferingUserDispersingResponse.name, \
|
||||
self.svcOfferingUserDispersingResponse.id,self.svcOfferingUserDispersingResponse.deploymentplanner))
|
||||
|
||||
def test_DeployVm(self):
|
||||
"""
|
||||
Let's start by defining the attributes of our VM that we will be
|
||||
deploying on CloudStack. We will be assuming a single zone is available
|
||||
and is configured and all templates are Ready
|
||||
|
||||
The hardcoded values are used only for brevity.
|
||||
"""
|
||||
deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
deployVmCmd.zoneid = 1
|
||||
deployVmCmd.account = self.acct.account
|
||||
deployVmCmd.domainid = self.acct.domainid
|
||||
deployVmCmd.templateid = 5 #For default template- CentOS 5.6(64 bit)
|
||||
deployVmCmd.serviceofferingid = self.svcOfferingFirstFitResponse.id
|
||||
|
||||
deployVmResponse = self.apiClient.deployVirtualMachine(deployVmCmd)
|
||||
self.debug("VM %s was deployed in the job %s"%(deployVmResponse.id, deployVmResponse.jobid))
|
||||
|
||||
# At this point our VM is expected to be Running. Let's find out what
|
||||
# listVirtualMachines tells us about VMs in this account
|
||||
|
||||
listVmCmd = listVirtualMachines.listVirtualMachinesCmd()
|
||||
listVmCmd.id = deployVmResponse.id
|
||||
listVmResponse = self.apiClient.listVirtualMachines(listVmCmd)
|
||||
|
||||
self.assertNotEqual(len(listVmResponse), 0, "Check if the list API \
|
||||
returns a non-empty response")
|
||||
|
||||
vm1 = listVmResponse[0]
|
||||
|
||||
self.assertEqual(vm1.id, deployVmResponse.id, "Check if the VM returned \
|
||||
is the same as the one we deployed")
|
||||
self.assertEqual(vm1.state, "Running", "Check if VM has reached \
|
||||
a state of running")
|
||||
|
||||
|
||||
deployVm2Cmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
deployVm2Cmd.zoneid = 1
|
||||
deployVm2Cmd.account = self.acct.account
|
||||
deployVm2Cmd.domainid = self.acct.domainid
|
||||
deployVm2Cmd.templateid = 5 #For default template- CentOS 5.6(64 bit)
|
||||
deployVm2Cmd.serviceofferingid = self.svcOfferingFirstFitResponse.id
|
||||
|
||||
deployVm2Response = self.apiClient.deployVirtualMachine(deployVm2Cmd)
|
||||
self.debug("VM %s was deployed in the job %s"%(deployVm2Response.id, deployVm2Response.jobid))
|
||||
|
||||
# At this point our VM is expected to be Running. Let's find out what
|
||||
# listVirtualMachines tells us about VMs in this account
|
||||
|
||||
listVm2Cmd = listVirtualMachines.listVirtualMachinesCmd()
|
||||
listVm2Cmd.id = deployVm2Response.id
|
||||
listVm2Response = self.apiClient.listVirtualMachines(listVm2Cmd)
|
||||
self.assertNotEqual(len(listVm2Response), 0, "Check if the list API \
|
||||
returns a non-empty response")
|
||||
vm2 = listVm2Response[0]
|
||||
self.assertEqual(vm2.id, deployVm2Response.id, "Check if the VM returned \
|
||||
is the same as the one we deployed")
|
||||
self.assertEqual(vm2.state, "Running", "Check if VM has reached \
|
||||
a state of running")
|
||||
|
||||
|
||||
def tearDown(self): # Teardown will delete the Account as well as the VM once the VM reaches "Running" state
|
||||
"""
|
||||
And finally let us cleanup the resources we created by deleting the
|
||||
account. All good unittests are atomic and rerunnable this way
|
||||
"""
|
||||
deleteAcct = deleteAccount.deleteAccountCmd()
|
||||
deleteAcct.id = self.acctResponse.id
|
||||
self.apiClient.deleteAccount(deleteAcct)
|
||||
deleteSvcOfferingFirstFit = deleteServiceOffering.deleteServiceOfferingCmd()
|
||||
deleteSvcOfferingFirstFit.id = self.svcOfferingFirstFitResponse.id
|
||||
self.apiClient.deleteServiceOffering(deleteSvcOfferingFirstFit);
|
||||
deleteSvcOfferingUserDispersing = deleteServiceOffering.deleteServiceOfferingCmd()
|
||||
deleteSvcOfferingUserDispersing.id = self.svcOfferingUserDispersingResponse.id
|
||||
self.apiClient.deleteServiceOffering(deleteSvcOfferingUserDispersing);
|
||||
|
||||
|
|
@ -141,11 +141,17 @@ class TestRouterServices(cloudstackTestCase):
|
|||
# by checking status of dnsmasq process
|
||||
|
||||
# Find router associated with user account
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.name,
|
||||
domainid=self.account.domainid
|
||||
)
|
||||
if self.zone.networktype == "Basic":
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
listall="true"
|
||||
)
|
||||
else:
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.name,
|
||||
domainid=self.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
|
|
|
|||
|
|
@ -53,19 +53,17 @@ class Services:
|
|||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 128, # In MBs
|
||||
"storagetype": "local"
|
||||
"memory": 260 # In MBs
|
||||
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"storagetype": "local",
|
||||
"disksize": 1
|
||||
},
|
||||
'resized_disk_offering': {
|
||||
"displaytext": "Resized",
|
||||
"name": "Resized",
|
||||
"storagetype": "local",
|
||||
"disksize": 3
|
||||
},
|
||||
"volume_offerings": {
|
||||
|
|
@ -152,7 +150,7 @@ class TestCreateVolume(cloudstackTestCase):
|
|||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_01_create_volume(self):
|
||||
"""Test Volume creation for all Disk Offerings (incl. custom)
|
||||
"""
|
||||
|
|
@ -346,8 +344,9 @@ class TestVolumes(cloudstackTestCase):
|
|||
cls.custom_resized_disk_offering,
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.volume,
|
||||
cls.account
|
||||
]
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
|
|
@ -359,14 +358,17 @@ class TestVolumes(cloudstackTestCase):
|
|||
def setUp(self):
|
||||
self.apiClient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.attached = False
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
#Clean up, terminate the created volumes
|
||||
if self.attached:
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
cleanup_resources(self.apiClient, self.cleanup)
|
||||
return
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_02_attach_volume(self):
|
||||
"""Attach a created Volume to a Running VM
|
||||
"""
|
||||
|
|
@ -381,7 +383,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
self.virtual_machine.id
|
||||
))
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
|
||||
self.attached = True
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id
|
||||
|
|
@ -412,7 +414,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
(self.virtual_machine.ipaddress, e))
|
||||
return
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_03_download_attached_volume(self):
|
||||
"""Download a Volume attached to a VM
|
||||
"""
|
||||
|
|
@ -423,6 +425,8 @@ class TestVolumes(cloudstackTestCase):
|
|||
|
||||
self.debug("Extract attached Volume ID: %s" % self.volume.id)
|
||||
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
self.attached = True
|
||||
cmd = extractVolume.extractVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.mode = "HTTP_DOWNLOAD"
|
||||
|
|
@ -432,7 +436,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
with self.assertRaises(Exception):
|
||||
self.apiClient.extractVolume(cmd)
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_04_delete_attached_volume(self):
|
||||
"""Delete a Volume attached to a VM
|
||||
"""
|
||||
|
|
@ -444,19 +448,16 @@ class TestVolumes(cloudstackTestCase):
|
|||
|
||||
self.debug("Trying to delete attached Volume ID: %s" %
|
||||
self.volume.id)
|
||||
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
self.attached = True
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
#Proper exception should be raised; deleting attach VM is not allowed
|
||||
#with self.assertRaises(Exception):
|
||||
result = self.apiClient.deleteVolume(cmd)
|
||||
self.assertEqual(
|
||||
result,
|
||||
None,
|
||||
"Check for delete download error while volume is attached"
|
||||
)
|
||||
with self.assertRaises(Exception):
|
||||
self.apiClient.deleteVolume(cmd)
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_05_detach_volume(self):
|
||||
"""Detach a Volume attached to a VM
|
||||
"""
|
||||
|
|
@ -470,8 +471,9 @@ class TestVolumes(cloudstackTestCase):
|
|||
self.volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
self.attached = False
|
||||
#Sleep to ensure the current state will reflected in other calls
|
||||
time.sleep(self.services["sleep"])
|
||||
list_volume_response = list_volumes(
|
||||
|
|
@ -497,7 +499,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
)
|
||||
return
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_06_download_detached_volume(self):
|
||||
"""Download a Volume unattached to an VM
|
||||
"""
|
||||
|
|
@ -506,6 +508,10 @@ class TestVolumes(cloudstackTestCase):
|
|||
|
||||
self.debug("Extract detached Volume ID: %s" % self.volume.id)
|
||||
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
self.attached = False
|
||||
|
||||
cmd = extractVolume.extractVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.mode = "HTTP_DOWNLOAD"
|
||||
|
|
@ -528,7 +534,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
% (extract_vol.url, self.volume.id)
|
||||
)
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_07_resize_fail(self):
|
||||
"""Verify invalid options fail to Resize a volume"""
|
||||
# Verify the size is the new size is what we wanted it to be.
|
||||
|
|
@ -543,7 +549,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
response = self.apiClient.resizeVolume(cmd)
|
||||
except Exception as ex:
|
||||
#print str(ex)
|
||||
if "HTTP Error 431:" in str(ex):
|
||||
if "invalid" in str(ex):
|
||||
success = True
|
||||
self.assertEqual(
|
||||
success,
|
||||
|
|
@ -557,7 +563,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
try:
|
||||
response = self.apiClient.resizeVolume(cmd)
|
||||
except Exception as ex:
|
||||
if "HTTP Error 431:" in str(ex):
|
||||
if "invalid" in str(ex):
|
||||
success = True
|
||||
self.assertEqual(
|
||||
success,
|
||||
|
|
@ -576,6 +582,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
)
|
||||
#attach the volume
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
self.attached = True
|
||||
#stop the vm if it is on xenserver
|
||||
if self.services['hypervisor'].lower() == "xenserver":
|
||||
self.virtual_machine.stop(self.apiClient)
|
||||
|
|
@ -603,10 +610,11 @@ class TestVolumes(cloudstackTestCase):
|
|||
True,
|
||||
"Verify the volume did not resize"
|
||||
)
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
self.cleanup.append(self.volume)
|
||||
if self.services['hypervisor'].lower() == "xenserver":
|
||||
self.virtual_machine.start(self.apiClient)
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke", "basic"])
|
||||
def test_08_resize_volume(self):
|
||||
"""Resize a volume"""
|
||||
# Verify the size is the new size is what we wanted it to be.
|
||||
|
|
@ -616,6 +624,8 @@ class TestVolumes(cloudstackTestCase):
|
|||
self.virtual_machine.id
|
||||
))
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
self.attached = True
|
||||
|
||||
if self.services['hypervisor'].lower() == "xenserver":
|
||||
self.virtual_machine.stop(self.apiClient)
|
||||
self.debug("Resize Volume ID: %s" % self.volume.id)
|
||||
|
|
@ -635,7 +645,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
type='DATADISK'
|
||||
)
|
||||
for vol in list_volume_response:
|
||||
if vol.id == self.volume.id and vol.size == 3221225472L:
|
||||
if vol.id == self.volume.id and vol.size == 3221225472L and vol.state == 'Ready':
|
||||
success = True
|
||||
if success:
|
||||
break
|
||||
|
|
@ -649,10 +659,10 @@ class TestVolumes(cloudstackTestCase):
|
|||
"Check if the volume resized appropriately"
|
||||
)
|
||||
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
self.cleanup.append(self.volume)
|
||||
if self.services['hypervisor'].lower() == "xenserver":
|
||||
self.virtual_machine.start(self.apiClient)
|
||||
|
||||
@attr(tags = ["advanced", "advancedns", "smoke"])
|
||||
@attr(tags = ["advanced", "advancedns", "smoke","basic"])
|
||||
def test_09_delete_detached_volume(self):
|
||||
"""Delete a Volume unattached to an VM
|
||||
"""
|
||||
|
|
@ -665,13 +675,23 @@ class TestVolumes(cloudstackTestCase):
|
|||
|
||||
self.debug("Delete Volume ID: %s" % self.volume.id)
|
||||
|
||||
self.volume_1 = Volume.create(
|
||||
self.api_client,
|
||||
self.services,
|
||||
account=self.account.name,
|
||||
domainid=self.account.domainid
|
||||
)
|
||||
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume_1)
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume_1)
|
||||
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.id = self.volume_1.id
|
||||
self.apiClient.deleteVolume(cmd)
|
||||
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id,
|
||||
id=self.volume_1.id,
|
||||
type='DATADISK'
|
||||
)
|
||||
self.assertEqual(
|
||||
|
|
|
|||
|
|
@ -142,6 +142,7 @@ known_categories = {
|
|||
'listNics':'Nic',
|
||||
'AffinityGroup': 'Affinity Group',
|
||||
'InternalLoadBalancer': 'Internal LB',
|
||||
'DeploymentPlanners': 'Configuration',
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -43,10 +43,8 @@ class cloudConnection(object):
|
|||
self.securityKey = securityKey
|
||||
self.mgtSvr = mgtSvr
|
||||
self.port = port
|
||||
if user:
|
||||
self.user = user
|
||||
if passwd:
|
||||
self.passwd = passwd
|
||||
self.user = user
|
||||
self.passwd = passwd
|
||||
self.logging = logging
|
||||
self.path = path
|
||||
self.retries = 5
|
||||
|
|
|
|||
|
|
@ -685,6 +685,7 @@ class Volume:
|
|||
timeout = timeout - 1
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def migrate(cls, apiclient, **kwargs):
|
||||
"""Migrate a volume"""
|
||||
cmd = migrateVolume.migrateVolumeCmd()
|
||||
|
|
@ -1268,6 +1269,10 @@ class ServiceOffering:
|
|||
|
||||
if "tags" in services:
|
||||
cmd.tags = services["tags"]
|
||||
|
||||
if "deploymentplanner" in services:
|
||||
cmd.deploymentplanner = services["deploymentplanner"]
|
||||
|
||||
# Service Offering private to that domain
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
|
|
|
|||
|
|
@ -17,13 +17,20 @@
|
|||
package com.cloud.hypervisor.vmware.mo;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.vmware.util.VmwareContext;
|
||||
import com.vmware.vim25.DVPortgroupConfigSpec;
|
||||
import com.vmware.vim25.DVSConfigInfo;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.TaskInfo;
|
||||
import com.vmware.vim25.VMwareDVSConfigInfo;
|
||||
import com.vmware.vim25.VMwareDVSConfigSpec;
|
||||
import com.vmware.vim25.VMwareDVSPvlanMapEntry;
|
||||
|
||||
public class DistributedVirtualSwitchMO extends BaseMO {
|
||||
private static final Logger s_logger = Logger.getLogger(DistributedVirtualSwitchMO.class);
|
||||
|
|
@ -46,4 +53,74 @@ public class DistributedVirtualSwitchMO extends BaseMO {
|
|||
// TODO(sateesh): Update numPorts
|
||||
_context.getService().reconfigureDVPortgroupTask(dvPortGroupMor, dvPortGroupSpec);
|
||||
}
|
||||
|
||||
public void updateVMWareDVSwitch(ManagedObjectReference dvSwitchMor, VMwareDVSConfigSpec dvsSpec) throws Exception {
|
||||
_context.getService().reconfigureDvsTask(dvSwitchMor, dvsSpec);
|
||||
}
|
||||
|
||||
public TaskInfo updateVMWareDVSwitchGetTask(ManagedObjectReference dvSwitchMor, VMwareDVSConfigSpec dvsSpec) throws Exception {
|
||||
ManagedObjectReference task = _context.getService().reconfigureDvsTask(dvSwitchMor, dvsSpec);
|
||||
TaskInfo info = (TaskInfo) (_context.getVimClient().getDynamicProperty(task, "info"));
|
||||
boolean waitvalue = _context.getVimClient().waitForTask(task);
|
||||
return info;
|
||||
}
|
||||
|
||||
public String getDVSConfigVersion(ManagedObjectReference dvSwitchMor) throws Exception {
|
||||
assert (dvSwitchMor != null);
|
||||
DVSConfigInfo dvsConfigInfo = (DVSConfigInfo)_context.getVimClient().getDynamicProperty(dvSwitchMor, "config");
|
||||
return dvsConfigInfo.getConfigVersion();
|
||||
}
|
||||
|
||||
public Map<Integer, HypervisorHostHelper.PvlanType> retrieveVlanPvlan(int vlanid, int secondaryvlanid, ManagedObjectReference dvSwitchMor) throws Exception {
|
||||
assert (dvSwitchMor != null);
|
||||
|
||||
Map<Integer, HypervisorHostHelper.PvlanType> result = new HashMap<Integer, HypervisorHostHelper.PvlanType>();
|
||||
|
||||
VMwareDVSConfigInfo configinfo = (VMwareDVSConfigInfo)_context.getVimClient().getDynamicProperty(dvSwitchMor, "config");
|
||||
List<VMwareDVSPvlanMapEntry> pvlanconfig = null;
|
||||
pvlanconfig = configinfo.getPvlanConfig();
|
||||
|
||||
if (null == pvlanconfig || 0 == pvlanconfig.size()) {
|
||||
return result;
|
||||
}
|
||||
// Iterate through the pvlanMapList and check if the specified vlan id and pvlan id exist. If they do, set the fields in result accordingly.
|
||||
|
||||
for (VMwareDVSPvlanMapEntry mapEntry : pvlanconfig) {
|
||||
int entryVlanid = mapEntry.getPrimaryVlanId();
|
||||
int entryPvlanid = mapEntry.getSecondaryVlanId();
|
||||
if (entryVlanid == entryPvlanid) {
|
||||
// promiscuous
|
||||
if (vlanid == entryVlanid) {
|
||||
// pvlan type will always be promiscuous in this case.
|
||||
result.put(vlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType()));
|
||||
} else if ((vlanid != secondaryvlanid) && secondaryvlanid == entryVlanid) {
|
||||
result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType()));
|
||||
}
|
||||
} else {
|
||||
if (vlanid == entryVlanid) {
|
||||
// vlan id in entry is promiscuous
|
||||
result.put(vlanid, HypervisorHostHelper.PvlanType.promiscuous);
|
||||
} else if (vlanid == entryPvlanid) {
|
||||
result.put(vlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType()));
|
||||
}
|
||||
if ((vlanid != secondaryvlanid) && secondaryvlanid == entryVlanid) {
|
||||
//promiscuous
|
||||
result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.promiscuous);
|
||||
} else if (secondaryvlanid == entryPvlanid) {
|
||||
result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType()));
|
||||
}
|
||||
|
||||
}
|
||||
// If we already know that the vlanid is being used as a non primary vlan, it's futile to
|
||||
// go over the entire list. Return.
|
||||
if (result.containsKey(vlanid) && result.get(vlanid) != HypervisorHostHelper.PvlanType.promiscuous)
|
||||
return result;
|
||||
|
||||
// If we've already found both vlanid and pvlanid, we have enough info to make a decision. Return.
|
||||
if (result.containsKey(vlanid) && result.containsKey(secondaryvlanid))
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.SwitchPortMode;
|
|||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.vmware.vim25.AlreadyExistsFaultMsg;
|
||||
import com.vmware.vim25.BoolPolicy;
|
||||
import com.vmware.vim25.DVPortSetting;
|
||||
import com.vmware.vim25.DVPortgroupConfigInfo;
|
||||
|
|
@ -59,7 +60,11 @@ import com.vmware.vim25.ObjectContent;
|
|||
import com.vmware.vim25.OvfCreateImportSpecParams;
|
||||
import com.vmware.vim25.OvfCreateImportSpecResult;
|
||||
import com.vmware.vim25.OvfFileItem;
|
||||
import com.vmware.vim25.TaskInfo;
|
||||
import com.vmware.vim25.VMwareDVSConfigSpec;
|
||||
import com.vmware.vim25.VMwareDVSPortSetting;
|
||||
import com.vmware.vim25.VMwareDVSPvlanConfigSpec;
|
||||
import com.vmware.vim25.VMwareDVSPvlanMapEntry;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpec;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
|
||||
import com.vmware.vim25.VirtualLsiLogicController;
|
||||
|
|
@ -67,6 +72,7 @@ import com.vmware.vim25.VirtualMachineConfigSpec;
|
|||
import com.vmware.vim25.VirtualMachineFileInfo;
|
||||
import com.vmware.vim25.VirtualMachineVideoCard;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec;
|
||||
|
||||
|
|
@ -124,12 +130,17 @@ public class HypervisorHostHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public static String composeCloudNetworkName(String prefix, String vlanId, Integer networkRateMbps, String vSwitchName) {
|
||||
public static String composeCloudNetworkName(String prefix, String vlanId, String svlanId, Integer networkRateMbps, String vSwitchName) {
|
||||
StringBuffer sb = new StringBuffer(prefix);
|
||||
if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId))
|
||||
if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) {
|
||||
sb.append(".untagged");
|
||||
else
|
||||
} else {
|
||||
sb.append(".").append(vlanId);
|
||||
if (svlanId != null) {
|
||||
sb.append(".").append("s" + svlanId);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if(networkRateMbps != null && networkRateMbps.intValue() > 0)
|
||||
sb.append(".").append(String.valueOf(networkRateMbps));
|
||||
|
|
@ -412,7 +423,7 @@ public class HypervisorHostHelper {
|
|||
*/
|
||||
|
||||
public static Pair<ManagedObjectReference, String> prepareNetwork(String physicalNetwork, String namePrefix,
|
||||
HostMO hostMo, String vlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs,
|
||||
HostMO hostMo, String vlanId, String secondaryvlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs,
|
||||
VirtualSwitchType vSwitchType, int numPorts, String gateway, boolean configureVServiceInNexus) throws Exception {
|
||||
ManagedObjectReference morNetwork = null;
|
||||
VmwareContext context = hostMo.getContext();
|
||||
|
|
@ -428,20 +439,28 @@ public class HypervisorHostHelper {
|
|||
boolean createGCTag = false;
|
||||
String networkName;
|
||||
Integer vid = null;
|
||||
Integer spvlanid = null; // secondary pvlan id
|
||||
|
||||
if(vlanId != null && !UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) {
|
||||
createGCTag = true;
|
||||
vid = Integer.parseInt(vlanId);
|
||||
}
|
||||
networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, physicalNetwork);
|
||||
if (secondaryvlanId != null) {
|
||||
spvlanid = Integer.parseInt(secondaryvlanId);
|
||||
}
|
||||
networkName = composeCloudNetworkName(namePrefix, vlanId, secondaryvlanId, networkRateMbps, physicalNetwork);
|
||||
|
||||
if (vSwitchType == VirtualSwitchType.VMwareDistributedVirtualSwitch) {
|
||||
VMwareDVSConfigSpec dvsSpec = null;
|
||||
DVSTrafficShapingPolicy shapingPolicy;
|
||||
VmwareDistributedVirtualSwitchVlanSpec vlanSpec;
|
||||
VmwareDistributedVirtualSwitchVlanSpec vlanSpec = null;
|
||||
VmwareDistributedVirtualSwitchPvlanSpec pvlanSpec = null;
|
||||
//VMwareDVSPvlanConfigSpec pvlanSpec = null;
|
||||
DVSSecurityPolicy secPolicy;
|
||||
VMwareDVSPortSetting dvsPortSetting;
|
||||
DVPortgroupConfigSpec dvPortGroupSpec;
|
||||
DVPortgroupConfigInfo dvPortgroupInfo;
|
||||
//DVSConfigInfo dvsInfo;
|
||||
|
||||
dvSwitchName = physicalNetwork;
|
||||
// TODO(sateesh): Remove this after ensuring proper default value for vSwitchName throughout traffic types
|
||||
|
|
@ -462,13 +481,95 @@ public class HypervisorHostHelper {
|
|||
dvSwitchMo = new DistributedVirtualSwitchMO(context, morDvSwitch);
|
||||
|
||||
shapingPolicy = getDVSShapingPolicy(networkRateMbps);
|
||||
if (vid != null) {
|
||||
vlanSpec = createDVPortVlanIdSpec(vid);
|
||||
} else {
|
||||
vlanSpec = createDVPortVlanSpec();
|
||||
}
|
||||
secPolicy = createDVSSecurityPolicy();
|
||||
|
||||
// First, if both vlan id and pvlan id are provided, we need to
|
||||
// reconfigure the DVSwitch to have a tuple <vlan id, pvlan id> of
|
||||
// type isolated.
|
||||
if (vid != null && spvlanid != null) {
|
||||
// First check if the vlan/pvlan pair already exists on this dvswitch.
|
||||
|
||||
Map<Integer, HypervisorHostHelper.PvlanType> vlanmap = dvSwitchMo.retrieveVlanPvlan(vid, spvlanid, morDvSwitch);
|
||||
if (vlanmap.size() != 0) {
|
||||
// Then either vid or pvlanid or both are already being used.
|
||||
if (vlanmap.containsKey(vid) && vlanmap.get(vid) != HypervisorHostHelper.PvlanType.promiscuous) {
|
||||
// This VLAN ID is already setup as a non-promiscuous vlan id on the DVS. Throw an exception.
|
||||
String msg = "VLAN ID " + vid + " is already in use as a " + vlanmap.get(vid).toString() + " VLAN on the DVSwitch";
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
if ((vid != spvlanid) && vlanmap.containsKey(spvlanid) && vlanmap.get(spvlanid) != HypervisorHostHelper.PvlanType.isolated) {
|
||||
// This PVLAN ID is already setup as a non-isolated vlan id on the DVS. Throw an exception.
|
||||
String msg = "PVLAN ID " + spvlanid + " is already in use as a " + vlanmap.get(spvlanid).toString() + " VLAN in the DVSwitch";
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
}
|
||||
|
||||
// First create a DVSconfig spec.
|
||||
dvsSpec = new VMwareDVSConfigSpec();
|
||||
// Next, add the required primary and secondary vlan config specs to the dvs config spec.
|
||||
if (!vlanmap.containsKey(vid)) {
|
||||
VMwareDVSPvlanConfigSpec ppvlanConfigSpec = createDVPortPvlanConfigSpec(vid, vid, PvlanType.promiscuous, PvlanOperation.add);
|
||||
dvsSpec.getPvlanConfigSpec().add(ppvlanConfigSpec);
|
||||
}
|
||||
if ( !vid.equals(spvlanid) && !vlanmap.containsKey(spvlanid)) {
|
||||
VMwareDVSPvlanConfigSpec spvlanConfigSpec = createDVPortPvlanConfigSpec(vid, spvlanid, PvlanType.isolated, PvlanOperation.add);
|
||||
dvsSpec.getPvlanConfigSpec().add(spvlanConfigSpec);
|
||||
}
|
||||
|
||||
if (dvsSpec.getPvlanConfigSpec().size() > 0) {
|
||||
// We have something to configure on the DVS... so send it the command.
|
||||
// When reconfiguring a vmware DVSwitch, we need to send in the configVersion in the spec.
|
||||
// Let's retrieve this switch's configVersion first.
|
||||
String dvsConfigVersion = dvSwitchMo.getDVSConfigVersion(morDvSwitch);
|
||||
dvsSpec.setConfigVersion(dvsConfigVersion);
|
||||
// Reconfigure the dvs using this spec.
|
||||
|
||||
try {
|
||||
TaskInfo reconfigTask = dvSwitchMo.updateVMWareDVSwitchGetTask(morDvSwitch, dvsSpec);
|
||||
} catch (Exception e) {
|
||||
if(e instanceof AlreadyExistsFaultMsg) {
|
||||
s_logger.info("Specified vlan id (" + vid + ") private vlan id (" + spvlanid + ") tuple already configured on VMWare DVSwitch");
|
||||
// Do nothing, good if the tuple's already configured on the dvswitch.
|
||||
} else {
|
||||
// Rethrow the exception
|
||||
s_logger.error("Failed to configure vlan/pvlan tuple on VMware DVSwitch: " + vid + "/" + spvlanid + ", failure message: " + e.getMessage());
|
||||
e.printStackTrace();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Else the vlan/pvlan pair already exists on the DVSwitch, and we needn't configure it again.
|
||||
}
|
||||
|
||||
// Next, create the port group. For this, we need to create a VLAN spec.
|
||||
if (vid == null) {
|
||||
vlanSpec = createDVPortVlanSpec();
|
||||
} else {
|
||||
if (spvlanid == null) {
|
||||
// Create vlan spec.
|
||||
vlanSpec = createDVPortVlanIdSpec(vid);
|
||||
} else {
|
||||
// Create a pvlan spec. The pvlan spec is different from the pvlan config spec
|
||||
// that we created earlier. The pvlan config spec is used to configure the switch
|
||||
// with a <primary vlanId, secondary vlanId> tuple. The pvlan spec is used
|
||||
// to configure a port group (i.e., a network) with a secondary vlan id. We don't
|
||||
// need to mention more than the secondary vlan id because one secondary vlan id
|
||||
// can be associated with only one primary vlan id. Give vCenter the secondary vlan id,
|
||||
// and it will find out the associated primary vlan id and do the rest of the
|
||||
// port group configuration.
|
||||
pvlanSpec = createDVPortPvlanIdSpec(spvlanid);
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE - VmwareDistributedVirtualSwitchPvlanSpec extends VmwareDistributedVirtualSwitchVlanSpec.
|
||||
if (pvlanSpec != null) {
|
||||
dvsPortSetting = createVmwareDVPortSettingSpec(shapingPolicy, secPolicy, pvlanSpec);
|
||||
} else {
|
||||
dvsPortSetting = createVmwareDVPortSettingSpec(shapingPolicy, secPolicy, vlanSpec);
|
||||
}
|
||||
|
||||
dvPortGroupSpec = createDvPortGroupSpec(networkName, dvsPortSetting, numPorts);
|
||||
|
||||
if (!dataCenterMo.hasDvPortGroup(networkName)) {
|
||||
|
|
@ -627,7 +728,6 @@ public class HypervisorHostHelper {
|
|||
dvsPortSetting.setSecurityPolicy(secPolicy);
|
||||
dvsPortSetting.setInShapingPolicy(shapingPolicy);
|
||||
dvsPortSetting.setOutShapingPolicy(shapingPolicy);
|
||||
|
||||
return dvsPortSetting;
|
||||
}
|
||||
|
||||
|
|
@ -658,6 +758,35 @@ public class HypervisorHostHelper {
|
|||
return shapingPolicy;
|
||||
}
|
||||
|
||||
public static VmwareDistributedVirtualSwitchPvlanSpec createDVPortPvlanIdSpec(int pvlanId) {
|
||||
VmwareDistributedVirtualSwitchPvlanSpec pvlanIdSpec = new VmwareDistributedVirtualSwitchPvlanSpec();
|
||||
pvlanIdSpec.setPvlanId(pvlanId);
|
||||
return pvlanIdSpec;
|
||||
}
|
||||
|
||||
public enum PvlanOperation {
|
||||
add,
|
||||
edit,
|
||||
remove
|
||||
}
|
||||
|
||||
public enum PvlanType {
|
||||
promiscuous,
|
||||
isolated,
|
||||
community, // We don't use Community
|
||||
}
|
||||
|
||||
public static VMwareDVSPvlanConfigSpec createDVPortPvlanConfigSpec(int vlanId, int secondaryVlanId, PvlanType pvlantype, PvlanOperation operation) {
|
||||
VMwareDVSPvlanConfigSpec pvlanConfigSpec = new VMwareDVSPvlanConfigSpec();
|
||||
VMwareDVSPvlanMapEntry map = new VMwareDVSPvlanMapEntry();
|
||||
map.setPvlanType(pvlantype.toString());
|
||||
map.setPrimaryVlanId(vlanId);
|
||||
map.setSecondaryVlanId(secondaryVlanId);
|
||||
pvlanConfigSpec.setPvlanEntry(map);
|
||||
|
||||
pvlanConfigSpec.setOperation(operation.toString());
|
||||
return pvlanConfigSpec;
|
||||
}
|
||||
public static VmwareDistributedVirtualSwitchVlanIdSpec createDVPortVlanIdSpec(int vlanId) {
|
||||
VmwareDistributedVirtualSwitchVlanIdSpec vlanIdSpec = new VmwareDistributedVirtualSwitchVlanIdSpec();
|
||||
vlanIdSpec.setVlanId(vlanId);
|
||||
|
|
@ -706,7 +835,7 @@ public class HypervisorHostHelper {
|
|||
vid = Integer.parseInt(vlanId);
|
||||
}
|
||||
|
||||
networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, vSwitchName);
|
||||
networkName = composeCloudNetworkName(namePrefix, vlanId, null, networkRateMbps, vSwitchName);
|
||||
HostNetworkSecurityPolicy secPolicy = null;
|
||||
if (namePrefix.equalsIgnoreCase("cloud.private")) {
|
||||
secPolicy = new HostNetworkSecurityPolicy();
|
||||
|
|
@ -1036,6 +1165,7 @@ public class HypervisorHostHelper {
|
|||
|
||||
context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile,
|
||||
bytesAlreadyWritten, new ActionDelegate<Long> () {
|
||||
@Override
|
||||
public void action(Long param) {
|
||||
progressReporter.reportProgress((int)(param * 100 / totalBytes));
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue