From 62e9273581db9d65b87b6224a701efca1ff472db Mon Sep 17 00:00:00 2001 From: Alex Huang Date: Thu, 21 Nov 2013 03:12:05 -0800 Subject: [PATCH] Removed trailing spaces --- .../api/storage/CreateVolumeOVAAnswer.java | 52 +- .../api/storage/CreateVolumeOVACommand.java | 120 +- .../api/storage/PrepareOVAPackingAnswer.java | 52 +- .../api/storage/PrepareOVAPackingCommand.java | 92 +- .../exception/AffinityConflictException.java | 68 +- .../cloudstack/affinity/AffinityGroup.java | 66 +- .../affinity/AffinityGroupProcessor.java | 174 +- .../affinity/AffinityGroupService.java | 164 +- .../affinity/AffinityProcessorBase.java | 138 +- .../config/ListDeploymentPlannersCmd.java | 138 +- .../core/ec2/EC2ModifyInstanceAttribute.java | 126 +- .../cloud/agent/api/UnregisterVMCommand.java | 68 +- .../deploy/DeploymentPlanningManager.java | 98 +- .../framework/config/ConfigDepot.java | 60 +- .../framework/config/ConfigKey.java | 374 +-- .../affinity/HostAntiAffinityProcessor.java | 298 +- pom.xml | 2 - .../deploy/DeploymentPlanningManagerImpl.java | 2654 ++++++++--------- .../deploy/dao/PlannerHostReservationDao.java | 64 +- .../dao/PlannerHostReservationDaoImpl.java | 148 +- .../vm/DeploymentPlanningManagerImplTest.java | 766 ++--- .../affinity/AffinityApiUnitTest.java | 586 ++-- 22 files changed, 3153 insertions(+), 3155 deletions(-) diff --git a/api/src/com/cloud/agent/api/storage/CreateVolumeOVAAnswer.java b/api/src/com/cloud/agent/api/storage/CreateVolumeOVAAnswer.java index a703ab52637..60d74f6c86e 100755 --- a/api/src/com/cloud/agent/api/storage/CreateVolumeOVAAnswer.java +++ b/api/src/com/cloud/agent/api/storage/CreateVolumeOVAAnswer.java @@ -1,26 +1,26 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.agent.api.storage; - -import com.cloud.agent.api.Answer; - -public class CreateVolumeOVAAnswer extends Answer { - public CreateVolumeOVAAnswer(CreateVolumeOVACommand cmd, boolean result, String details) { - super(cmd, result, details); - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Answer; + +public class CreateVolumeOVAAnswer extends Answer { + public CreateVolumeOVAAnswer(CreateVolumeOVACommand cmd, boolean result, String details) { + super(cmd, result, details); + } + +} diff --git a/api/src/com/cloud/agent/api/storage/CreateVolumeOVACommand.java b/api/src/com/cloud/agent/api/storage/CreateVolumeOVACommand.java index cec48ee1f14..b13293f522c 100755 --- a/api/src/com/cloud/agent/api/storage/CreateVolumeOVACommand.java +++ b/api/src/com/cloud/agent/api/storage/CreateVolumeOVACommand.java @@ -1,60 +1,60 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.agent.api.storage; - -import com.cloud.agent.api.Command; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.storage.StoragePool; - -public class CreateVolumeOVACommand extends Command { - String secUrl; - String volPath; - String volName; - StorageFilerTO pool; - - public CreateVolumeOVACommand() { - } - - public CreateVolumeOVACommand(String secUrl, String volPath, String volName, StoragePool pool, int wait) { - this.secUrl = secUrl; - this.volPath = volPath; - this.volName = volName; - this.pool = new StorageFilerTO(pool); - setWait(wait); - } - - @Override - public boolean executeInSequence() { - return true; - } - - public String getVolPath() { - return this.volPath; - } - - public String getVolName() { - return this.volName; - } - - public String getSecondaryStorageUrl() { - return this.secUrl; - } - - public StorageFilerTO getPool() { - return pool; - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.storage.StoragePool; + +public class CreateVolumeOVACommand extends Command { + String secUrl; + String volPath; + String volName; + StorageFilerTO pool; + + public CreateVolumeOVACommand() { + } + + public CreateVolumeOVACommand(String secUrl, String volPath, String volName, StoragePool pool, int wait) { + this.secUrl = secUrl; + this.volPath = volPath; + this.volName = volName; + this.pool = new StorageFilerTO(pool); + setWait(wait); + } + + @Override + public boolean executeInSequence() { + return true; + } + + public String getVolPath() { + return this.volPath; + } + + public String getVolName() { + return this.volName; + } + + public String getSecondaryStorageUrl() { + return this.secUrl; + } + + public StorageFilerTO getPool() { + return pool; + } +} diff --git a/api/src/com/cloud/agent/api/storage/PrepareOVAPackingAnswer.java b/api/src/com/cloud/agent/api/storage/PrepareOVAPackingAnswer.java index 923d952a137..dad660b90f9 100755 --- a/api/src/com/cloud/agent/api/storage/PrepareOVAPackingAnswer.java +++ b/api/src/com/cloud/agent/api/storage/PrepareOVAPackingAnswer.java @@ -1,26 +1,26 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.agent.api.storage; - -import com.cloud.agent.api.Answer; - -public class PrepareOVAPackingAnswer extends Answer { - public PrepareOVAPackingAnswer(PrepareOVAPackingCommand cmd, boolean result, String details) { - super(cmd, result, details); - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Answer; + +public class PrepareOVAPackingAnswer extends Answer { + public PrepareOVAPackingAnswer(PrepareOVAPackingCommand cmd, boolean result, String details) { + super(cmd, result, details); + } + +} diff --git a/api/src/com/cloud/agent/api/storage/PrepareOVAPackingCommand.java b/api/src/com/cloud/agent/api/storage/PrepareOVAPackingCommand.java index 7bd22e46f54..bd3eb80b8ad 100755 --- a/api/src/com/cloud/agent/api/storage/PrepareOVAPackingCommand.java +++ b/api/src/com/cloud/agent/api/storage/PrepareOVAPackingCommand.java @@ -1,46 +1,46 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.agent.api.storage; - -import com.cloud.agent.api.Command; - -public class PrepareOVAPackingCommand extends Command { - private String templatePath; - private String secUrl; - - public PrepareOVAPackingCommand() { - } - - public PrepareOVAPackingCommand(String secUrl, String templatePath) { - this.secUrl = secUrl; - this.templatePath = templatePath; - } - - @Override - public boolean executeInSequence() { - return true; - } - - public String getTemplatePath() { - return this.templatePath; - } - - public String getSecondaryStorageUrl() { - return this.secUrl; - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Command; + +public class PrepareOVAPackingCommand extends Command { + private String templatePath; + private String secUrl; + + public PrepareOVAPackingCommand() { + } + + public PrepareOVAPackingCommand(String secUrl, String templatePath) { + this.secUrl = secUrl; + this.templatePath = templatePath; + } + + @Override + public boolean executeInSequence() { + return true; + } + + public String getTemplatePath() { + return this.templatePath; + } + + public String getSecondaryStorageUrl() { + return this.secUrl; + } + +} diff --git a/api/src/com/cloud/exception/AffinityConflictException.java b/api/src/com/cloud/exception/AffinityConflictException.java index 8b187783f24..3faa09f5b3c 100644 --- a/api/src/com/cloud/exception/AffinityConflictException.java +++ b/api/src/com/cloud/exception/AffinityConflictException.java @@ -1,34 +1,34 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.exception; - -import com.cloud.utils.SerialVersionUID; -import com.cloud.utils.exception.CloudRuntimeException; - -public class AffinityConflictException extends CloudRuntimeException { - - private static final long serialVersionUID = SerialVersionUID.AffinityConflictException; - - public AffinityConflictException(String message) { - super(message); - } - - public AffinityConflictException(String message, Throwable th) { - super(message, th); - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.exception; + +import com.cloud.utils.SerialVersionUID; +import com.cloud.utils.exception.CloudRuntimeException; + +public class AffinityConflictException extends CloudRuntimeException { + + private static final long serialVersionUID = SerialVersionUID.AffinityConflictException; + + public AffinityConflictException(String message) { + super(message); + } + + public AffinityConflictException(String message, Throwable th) { + super(message, th); + } + +} diff --git a/api/src/org/apache/cloudstack/affinity/AffinityGroup.java b/api/src/org/apache/cloudstack/affinity/AffinityGroup.java index c1ad11dbdd0..cb4e762677a 100644 --- a/api/src/org/apache/cloudstack/affinity/AffinityGroup.java +++ b/api/src/org/apache/cloudstack/affinity/AffinityGroup.java @@ -1,33 +1,33 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.affinity; - -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.api.InternalIdentity; - -public interface AffinityGroup extends ControlledEntity, InternalIdentity, Identity { - - String getName(); - - String getDescription(); - - String getType(); - - ACLType getAclType(); - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface AffinityGroup extends ControlledEntity, InternalIdentity, Identity { + + String getName(); + + String getDescription(); + + String getType(); + + ACLType getAclType(); + +} diff --git a/api/src/org/apache/cloudstack/affinity/AffinityGroupProcessor.java b/api/src/org/apache/cloudstack/affinity/AffinityGroupProcessor.java index 91b07b29af6..b312b8bc47b 100644 --- a/api/src/org/apache/cloudstack/affinity/AffinityGroupProcessor.java +++ b/api/src/org/apache/cloudstack/affinity/AffinityGroupProcessor.java @@ -1,88 +1,88 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.affinity; - -import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeploymentPlan; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.AffinityConflictException; -import com.cloud.utils.component.Adapter; -import com.cloud.vm.VirtualMachineProfile; - -public interface AffinityGroupProcessor extends Adapter { - - /** - * process() is called to apply any user preferences to the deployment plan - * and avoid set for the given VM placement. - * - * @param vm - * virtual machine. - * @param plan - * deployment plan that tells you where it's being deployed to. - * @param avoid - * avoid these data centers, pods, clusters, or hosts. - */ - void process(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws AffinityConflictException; - - /** - * getType() should return the affinity/anti-affinity group being - * implemented - * - * @return String Affinity/Anti-affinity type - */ - String getType(); - - /** - * check() is called to see if the planned destination fits the group - * requirements - * - * @param vm - * virtual machine. - * @param plannedDestination - * deployment destination where VM is planned to be deployed - */ - boolean check(VirtualMachineProfile vm, DeployDestination plannedDestination) throws AffinityConflictException; - - /** - * isAdminControlledGroup() should return true if the affinity/anti-affinity - * group can only be operated on[create/delete/modify] by the Admin - * - * @return boolean true/false - */ - boolean isAdminControlledGroup(); - - /** - * canBeSharedDomainWide() should return true if the affinity/anti-affinity - * group can be created for a domain and shared by all accounts under the - * domain. - * - * @return boolean true/false - */ - boolean canBeSharedDomainWide(); - - /** - * subDomainAccess() should return true if the affinity/anti-affinity group - * can be created for a domain and used by the sub-domains. If true, all - * accounts under the sub-domains can see this group and use it. - * - * @return boolean true/false - */ - boolean subDomainAccess(); - - void handleDeleteGroup(AffinityGroup group); - +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.exception.AffinityConflictException; +import com.cloud.utils.component.Adapter; +import com.cloud.vm.VirtualMachineProfile; + +public interface AffinityGroupProcessor extends Adapter { + + /** + * process() is called to apply any user preferences to the deployment plan + * and avoid set for the given VM placement. + * + * @param vm + * virtual machine. + * @param plan + * deployment plan that tells you where it's being deployed to. + * @param avoid + * avoid these data centers, pods, clusters, or hosts. + */ + void process(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws AffinityConflictException; + + /** + * getType() should return the affinity/anti-affinity group being + * implemented + * + * @return String Affinity/Anti-affinity type + */ + String getType(); + + /** + * check() is called to see if the planned destination fits the group + * requirements + * + * @param vm + * virtual machine. + * @param plannedDestination + * deployment destination where VM is planned to be deployed + */ + boolean check(VirtualMachineProfile vm, DeployDestination plannedDestination) throws AffinityConflictException; + + /** + * isAdminControlledGroup() should return true if the affinity/anti-affinity + * group can only be operated on[create/delete/modify] by the Admin + * + * @return boolean true/false + */ + boolean isAdminControlledGroup(); + + /** + * canBeSharedDomainWide() should return true if the affinity/anti-affinity + * group can be created for a domain and shared by all accounts under the + * domain. + * + * @return boolean true/false + */ + boolean canBeSharedDomainWide(); + + /** + * subDomainAccess() should return true if the affinity/anti-affinity group + * can be created for a domain and used by the sub-domains. If true, all + * accounts under the sub-domains can see this group and use it. + * + * @return boolean true/false + */ + boolean subDomainAccess(); + + void handleDeleteGroup(AffinityGroup group); + } \ No newline at end of file diff --git a/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java b/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java index 4f65af4e6c2..8ffc3b29612 100644 --- a/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java +++ b/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java @@ -1,82 +1,82 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.affinity; - -import java.util.List; - -import com.cloud.uservm.UserVm; -import com.cloud.utils.Pair; - -public interface AffinityGroupService { - - /** - * Creates an affinity/anti-affinity group for the given account/domain. - * - * @param account - * @param domainId - * @param name - * @param type - * @param description - * @return AffinityGroup - */ - - AffinityGroup createAffinityGroup(String account, Long domainId, String affinityGroupName, String affinityGroupType, String description); - - /** - * Creates an affinity/anti-affinity group. - * - * @param affinityGroupId - * @param account - * @param domainId - * @param affinityGroupName - */ - boolean deleteAffinityGroup(Long affinityGroupId, String account, Long domainId, String affinityGroupName); - - /** Lists Affinity Groups in your account - * @param account - * @param domainId - * @param affinityGroupId - * @param affinityGroupName - * @param affinityGroupType - * @param vmId - * @param startIndex - * @param pageSize - * @return - */ - Pair, Integer> listAffinityGroups(Long affinityGroupId, String affinityGroupName, String affinityGroupType, Long vmId, Long startIndex, - Long pageSize); - - /** - * List group types available in deployment - * - * @return - */ - List listAffinityGroupTypes(); - - AffinityGroup getAffinityGroup(Long groupId); - - UserVm updateVMAffinityGroups(Long vmId, List affinityGroupIds); - - boolean isAffinityGroupProcessorAvailable(String affinityGroupType); - - boolean isAdminControlledGroup(AffinityGroup group); - - boolean isAffinityGroupAvailableInDomain(long affinityGroupId, long domainId); - - AffinityGroup createAffinityGroupInternal(String account, Long domainId, String affinityGroupName, String affinityGroupType, String description); - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import java.util.List; + +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; + +public interface AffinityGroupService { + + /** + * Creates an affinity/anti-affinity group for the given account/domain. + * + * @param account + * @param domainId + * @param name + * @param type + * @param description + * @return AffinityGroup + */ + + AffinityGroup createAffinityGroup(String account, Long domainId, String affinityGroupName, String affinityGroupType, String description); + + /** + * Creates an affinity/anti-affinity group. + * + * @param affinityGroupId + * @param account + * @param domainId + * @param affinityGroupName + */ + boolean deleteAffinityGroup(Long affinityGroupId, String account, Long domainId, String affinityGroupName); + + /** Lists Affinity Groups in your account + * @param account + * @param domainId + * @param affinityGroupId + * @param affinityGroupName + * @param affinityGroupType + * @param vmId + * @param startIndex + * @param pageSize + * @return + */ + Pair, Integer> listAffinityGroups(Long affinityGroupId, String affinityGroupName, String affinityGroupType, Long vmId, Long startIndex, + Long pageSize); + + /** + * List group types available in deployment + * + * @return + */ + List listAffinityGroupTypes(); + + AffinityGroup getAffinityGroup(Long groupId); + + UserVm updateVMAffinityGroups(Long vmId, List affinityGroupIds); + + boolean isAffinityGroupProcessorAvailable(String affinityGroupType); + + boolean isAdminControlledGroup(AffinityGroup group); + + boolean isAffinityGroupAvailableInDomain(long affinityGroupId, long domainId); + + AffinityGroup createAffinityGroupInternal(String account, Long domainId, String affinityGroupName, String affinityGroupType, String description); + +} diff --git a/api/src/org/apache/cloudstack/affinity/AffinityProcessorBase.java b/api/src/org/apache/cloudstack/affinity/AffinityProcessorBase.java index d4e8e4169b4..d48b5fd7556 100644 --- a/api/src/org/apache/cloudstack/affinity/AffinityProcessorBase.java +++ b/api/src/org/apache/cloudstack/affinity/AffinityProcessorBase.java @@ -1,69 +1,69 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.affinity; - -import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeploymentPlan; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.AffinityConflictException; -import com.cloud.utils.component.AdapterBase; -import com.cloud.vm.VirtualMachineProfile; - -public class AffinityProcessorBase extends AdapterBase implements AffinityGroupProcessor { - - protected String _type; - - @Override - public void process(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws AffinityConflictException { - - } - - @Override - public String getType() { - return _type; - } - - public void setType(String type) { - _type = type; - } - - @Override - public boolean check(VirtualMachineProfile vm, DeployDestination plannedDestination) throws AffinityConflictException { - return true; - } - - @Override - public boolean isAdminControlledGroup() { - return false; - } - - @Override - public boolean canBeSharedDomainWide() { - return false; - } - - @Override - public void handleDeleteGroup(AffinityGroup group) { - // TODO Auto-generated method stub - return; - } - - @Override - public boolean subDomainAccess() { - return false; - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.exception.AffinityConflictException; +import com.cloud.utils.component.AdapterBase; +import com.cloud.vm.VirtualMachineProfile; + +public class AffinityProcessorBase extends AdapterBase implements AffinityGroupProcessor { + + protected String _type; + + @Override + public void process(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws AffinityConflictException { + + } + + @Override + public String getType() { + return _type; + } + + public void setType(String type) { + _type = type; + } + + @Override + public boolean check(VirtualMachineProfile vm, DeployDestination plannedDestination) throws AffinityConflictException { + return true; + } + + @Override + public boolean isAdminControlledGroup() { + return false; + } + + @Override + public boolean canBeSharedDomainWide() { + return false; + } + + @Override + public void handleDeleteGroup(AffinityGroup group) { + // TODO Auto-generated method stub + return; + } + + @Override + public boolean subDomainAccess() { + return false; + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java b/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java index 237eb7a4f3a..65a4c840c94 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java @@ -1,69 +1,69 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.api.command.admin.config; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.BaseListCmd; -import org.apache.cloudstack.api.response.DeploymentPlannersResponse; -import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; - -@APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class) -public class ListDeploymentPlannersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName()); - - private static final String s_name = "listdeploymentplannersresponse"; - - ///////////////////////////////////////////////////// - //////////////// API parameters ///////////////////// - ///////////////////////////////////////////////////// - - ///////////////////////////////////////////////////// - /////////////////// Accessors /////////////////////// - ///////////////////////////////////////////////////// - - ///////////////////////////////////////////////////// - /////////////// API Implementation/////////////////// - ///////////////////////////////////////////////////// - - @Override - public String getCommandName() { - return s_name; - } - - @Override - public void execute() { - List planners = _mgr.listDeploymentPlanners(); - ListResponse response = new ListResponse(); - List plannerResponses = new ArrayList(); - - for (String planner : planners) { - DeploymentPlannersResponse plannerResponse = new DeploymentPlannersResponse(); - plannerResponse.setName(planner); - plannerResponse.setObjectName("deploymentPlanner"); - plannerResponses.add(plannerResponse); - } - - response.setResponses(plannerResponses); - response.setResponseName(getCommandName()); - this.setResponseObject(response); - - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.config; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.response.DeploymentPlannersResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.log4j.Logger; + +@APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class) +public class ListDeploymentPlannersCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName()); + + private static final String s_name = "listdeploymentplannersresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute() { + List planners = _mgr.listDeploymentPlanners(); + ListResponse response = new ListResponse(); + List plannerResponses = new ArrayList(); + + for (String planner : planners) { + DeploymentPlannersResponse plannerResponse = new DeploymentPlannersResponse(); + plannerResponse.setName(planner); + plannerResponse.setObjectName("deploymentPlanner"); + plannerResponses.add(plannerResponse); + } + + response.setResponses(plannerResponses); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + + } +} diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2ModifyInstanceAttribute.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2ModifyInstanceAttribute.java index 315584e4a5a..d5dcec0d0a9 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2ModifyInstanceAttribute.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2ModifyInstanceAttribute.java @@ -1,63 +1,63 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.bridge.service.core.ec2; - -public class EC2ModifyInstanceAttribute { - private String instanceId; - private String instanceType; - private String userData; - - /** - * @return instanceId - */ - public String getInstanceId() { - return instanceId; - } - - /** - * @param instanceId to set - */ - public void setInstanceId(String instanceId) { - this.instanceId = instanceId; - } - - /** - * @return instanceType - */ - public String getInstanceType() { - return instanceType; - } - - /** - * @param instanceType to set - */ - public void setInstanceType(String instanceType) { - this.instanceType = instanceType; - } - - /** - * @return userData - */ - public String getUserData() { - return userData; - } - - public void setUserData(String userData) { - this.userData = userData; - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.bridge.service.core.ec2; + +public class EC2ModifyInstanceAttribute { + private String instanceId; + private String instanceType; + private String userData; + + /** + * @return instanceId + */ + public String getInstanceId() { + return instanceId; + } + + /** + * @param instanceId to set + */ + public void setInstanceId(String instanceId) { + this.instanceId = instanceId; + } + + /** + * @return instanceType + */ + public String getInstanceType() { + return instanceType; + } + + /** + * @param instanceType to set + */ + public void setInstanceType(String instanceType) { + this.instanceType = instanceType; + } + + /** + * @return userData + */ + public String getUserData() { + return userData; + } + + public void setUserData(String userData) { + this.userData = userData; + } + +} diff --git a/core/src/com/cloud/agent/api/UnregisterVMCommand.java b/core/src/com/cloud/agent/api/UnregisterVMCommand.java index 049e99c77f0..466505f8e8d 100644 --- a/core/src/com/cloud/agent/api/UnregisterVMCommand.java +++ b/core/src/com/cloud/agent/api/UnregisterVMCommand.java @@ -1,34 +1,34 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.agent.api; - -public class UnregisterVMCommand extends Command { - String vmName; - - public UnregisterVMCommand(String vmName) { - this.vmName = vmName; - } - - @Override - public boolean executeInSequence() { - return false; - } - - public String getVmName() { - return vmName; - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +public class UnregisterVMCommand extends Command { + String vmName; + + public UnregisterVMCommand(String vmName) { + this.vmName = vmName; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getVmName() { + return vmName; + } +} diff --git a/engine/components-api/src/com/cloud/deploy/DeploymentPlanningManager.java b/engine/components-api/src/com/cloud/deploy/DeploymentPlanningManager.java index 4a053d4e255..b61e89ddcea 100644 --- a/engine/components-api/src/com/cloud/deploy/DeploymentPlanningManager.java +++ b/engine/components-api/src/com/cloud/deploy/DeploymentPlanningManager.java @@ -1,49 +1,49 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.AffinityConflictException; -import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.utils.component.Manager; -import com.cloud.vm.VirtualMachineProfile; - -public interface DeploymentPlanningManager extends Manager { - - /** - * Manages vm deployment stages: First Process Affinity/Anti-affinity - Call - * the chain of AffinityGroupProcessor adapters to set deploymentplan scope - * and exclude list Secondly, Call DeploymentPlanner - to use heuristics to - * find the best spot to place the vm/volume. Planner will drill down to the - * write set of clusters to look for placement based on various heuristics. - * Lastly, Call Allocators - Given a cluster, allocators matches the - * requirements to capabilities of the physical resource (host, storage - * pool). - * - * @throws AffinityConflictException - * - * - * - */ - DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException, - AffinityConflictException; - - String finalizeReservation(DeployDestination plannedDestination, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) - throws InsufficientServerCapacityException, AffinityConflictException; - - void cleanupVMReservations(); -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.utils.component.Manager; +import com.cloud.vm.VirtualMachineProfile; + +public interface DeploymentPlanningManager extends Manager { + + /** + * Manages vm deployment stages: First Process Affinity/Anti-affinity - Call + * the chain of AffinityGroupProcessor adapters to set deploymentplan scope + * and exclude list Secondly, Call DeploymentPlanner - to use heuristics to + * find the best spot to place the vm/volume. Planner will drill down to the + * write set of clusters to look for placement based on various heuristics. + * Lastly, Call Allocators - Given a cluster, allocators matches the + * requirements to capabilities of the physical resource (host, storage + * pool). + * + * @throws AffinityConflictException + * + * + * + */ + DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException, + AffinityConflictException; + + String finalizeReservation(DeployDestination plannedDestination, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) + throws InsufficientServerCapacityException, AffinityConflictException; + + void cleanupVMReservations(); +} diff --git a/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepot.java b/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepot.java index 4e02eb18f5f..8592745c7e7 100644 --- a/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepot.java +++ b/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepot.java @@ -1,30 +1,30 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.framework.config; - -import java.util.List; - -/** - * ConfigDepot is a repository of configurations. - * - */ -public interface ConfigDepot { - - ConfigKey get(String paramName); - - List> getConfigListByScope(String scope); -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.framework.config; + +import java.util.List; + +/** + * ConfigDepot is a repository of configurations. + * + */ +public interface ConfigDepot { + + ConfigKey get(String paramName); + + List> getConfigListByScope(String scope); +} diff --git a/framework/config/src/org/apache/cloudstack/framework/config/ConfigKey.java b/framework/config/src/org/apache/cloudstack/framework/config/ConfigKey.java index ea93feef093..879d772f99a 100644 --- a/framework/config/src/org/apache/cloudstack/framework/config/ConfigKey.java +++ b/framework/config/src/org/apache/cloudstack/framework/config/ConfigKey.java @@ -1,187 +1,187 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.framework.config; - -import java.sql.Date; - -import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; - -import com.cloud.utils.exception.CloudRuntimeException; - -/** - * ConfigKey supplants the original Config.java. It is just a class - * declaration where others can declare their config variables. - * - */ -public class ConfigKey { - - public static enum Scope { - Global, Zone, Cluster, StoragePool, Account, ManagementServer - } - - private final String _category; - - public String category() { - return _category; - } - - public Class type() { - return _type; - } - - public final String key() { - return _name; - } - - public String defaultValue() { - return _defaultValue; - } - - public String description() { - return _description; - } - - public Scope scope() { - return _scope; - } - - public boolean isDynamic() { - return _isDynamic; - } - - @Override - public String toString() { - return _name; - } - - private final Class _type; - private final String _name; - private final String _defaultValue; - private final String _description; - private final Scope _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global - private final boolean _isDynamic; - private final T _multiplier; - T _value = null; - - static ConfigDepotImpl s_depot = null; - - static public void init(ConfigDepotImpl depot) { - s_depot = depot; - } - - public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, Scope scope) { - this(type, name, category, defaultValue, description, isDynamic, scope, null); - } - - public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic) { - this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null); - } - - public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier) { - _category = category; - _type = type; - _name = name; - _defaultValue = defaultValue; - _description = description; - _scope = scope; - _isDynamic = isDynamic; - _multiplier = multiplier; - } - - @Deprecated - public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic) { - this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null); - } - - public T multiplier() { - return _multiplier; - } - - @Override - public int hashCode() { - return _name.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof ConfigKey) { - ConfigKey that = (ConfigKey)obj; - return this._name.equals(that._name); - } else if (obj instanceof String) { - String key = (String)obj; - return key.equals(_name); - } - - throw new CloudRuntimeException("Comparing ConfigKey to " + obj.toString()); - } - - public T value() { - if (_value == null || isDynamic()) { - ConfigurationVO vo = s_depot != null ? s_depot.global().findById(key()) : null; - _value = valueOf(vo != null ? vo.getValue() : defaultValue()); - } - - return _value; - } - - public T valueIn(Long id) { - if (id == null) { - return value(); - } - - String value = s_depot != null ? s_depot.scoped(this).getConfigValue(id, this) : null; - if (value == null) { - return value(); - } else { - return valueOf(value); - } - } - - @SuppressWarnings("unchecked") - protected T valueOf(String value) { - Number multiplier = 1; - if (multiplier() != null) { - multiplier = (Number)multiplier(); - } - Class type = type(); - if (type.isAssignableFrom(Boolean.class)) { - return (T)Boolean.valueOf(value); - } else if (type.isAssignableFrom(Integer.class)) { - return (T)new Integer(Integer.parseInt(value) * multiplier.intValue()); - } else if (type.isAssignableFrom(Long.class)) { - return (T)new Long(Long.parseLong(value) * multiplier.longValue()); - } else if (type.isAssignableFrom(Short.class)) { - return (T)new Short(Short.parseShort(value)); - } else if (type.isAssignableFrom(String.class)) { - return (T)value; - } else if (type.isAssignableFrom(Float.class)) { - return (T)new Float(Float.parseFloat(value) * multiplier.floatValue()); - } else if (type.isAssignableFrom(Double.class)) { - return (T)new Double(Double.parseDouble(value) * multiplier.doubleValue()); - } else if (type.isAssignableFrom(String.class)) { - return (T)value; - } else if (type.isAssignableFrom(Date.class)) { - return (T)Date.valueOf(value); - } else if (type.isAssignableFrom(Character.class)) { - return (T)new Character(value.charAt(0)); - } else { - throw new CloudRuntimeException("Unsupported data type for config values: " + type); - } - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.framework.config; + +import java.sql.Date; + +import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; + +import com.cloud.utils.exception.CloudRuntimeException; + +/** + * ConfigKey supplants the original Config.java. It is just a class + * declaration where others can declare their config variables. + * + */ +public class ConfigKey { + + public static enum Scope { + Global, Zone, Cluster, StoragePool, Account, ManagementServer + } + + private final String _category; + + public String category() { + return _category; + } + + public Class type() { + return _type; + } + + public final String key() { + return _name; + } + + public String defaultValue() { + return _defaultValue; + } + + public String description() { + return _description; + } + + public Scope scope() { + return _scope; + } + + public boolean isDynamic() { + return _isDynamic; + } + + @Override + public String toString() { + return _name; + } + + private final Class _type; + private final String _name; + private final String _defaultValue; + private final String _description; + private final Scope _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global + private final boolean _isDynamic; + private final T _multiplier; + T _value = null; + + static ConfigDepotImpl s_depot = null; + + static public void init(ConfigDepotImpl depot) { + s_depot = depot; + } + + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, Scope scope) { + this(type, name, category, defaultValue, description, isDynamic, scope, null); + } + + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic) { + this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null); + } + + public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier) { + _category = category; + _type = type; + _name = name; + _defaultValue = defaultValue; + _description = description; + _scope = scope; + _isDynamic = isDynamic; + _multiplier = multiplier; + } + + @Deprecated + public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic) { + this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null); + } + + public T multiplier() { + return _multiplier; + } + + @Override + public int hashCode() { + return _name.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof ConfigKey) { + ConfigKey that = (ConfigKey)obj; + return this._name.equals(that._name); + } else if (obj instanceof String) { + String key = (String)obj; + return key.equals(_name); + } + + throw new CloudRuntimeException("Comparing ConfigKey to " + obj.toString()); + } + + public T value() { + if (_value == null || isDynamic()) { + ConfigurationVO vo = s_depot != null ? s_depot.global().findById(key()) : null; + _value = valueOf(vo != null ? vo.getValue() : defaultValue()); + } + + return _value; + } + + public T valueIn(Long id) { + if (id == null) { + return value(); + } + + String value = s_depot != null ? s_depot.scoped(this).getConfigValue(id, this) : null; + if (value == null) { + return value(); + } else { + return valueOf(value); + } + } + + @SuppressWarnings("unchecked") + protected T valueOf(String value) { + Number multiplier = 1; + if (multiplier() != null) { + multiplier = (Number)multiplier(); + } + Class type = type(); + if (type.isAssignableFrom(Boolean.class)) { + return (T)Boolean.valueOf(value); + } else if (type.isAssignableFrom(Integer.class)) { + return (T)new Integer(Integer.parseInt(value) * multiplier.intValue()); + } else if (type.isAssignableFrom(Long.class)) { + return (T)new Long(Long.parseLong(value) * multiplier.longValue()); + } else if (type.isAssignableFrom(Short.class)) { + return (T)new Short(Short.parseShort(value)); + } else if (type.isAssignableFrom(String.class)) { + return (T)value; + } else if (type.isAssignableFrom(Float.class)) { + return (T)new Float(Float.parseFloat(value) * multiplier.floatValue()); + } else if (type.isAssignableFrom(Double.class)) { + return (T)new Double(Double.parseDouble(value) * multiplier.doubleValue()); + } else if (type.isAssignableFrom(String.class)) { + return (T)value; + } else if (type.isAssignableFrom(Date.class)) { + return (T)Date.valueOf(value); + } else if (type.isAssignableFrom(Character.class)) { + return (T)new Character(value.charAt(0)); + } else { + throw new CloudRuntimeException("Unsupported data type for config values: " + type); + } + } + +} diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 712f6b2e97a..5d8d12f4ad4 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -1,149 +1,149 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.affinity; - -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; -import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.messagebus.MessageSubscriber; - -import org.apache.log4j.Logger; - -import com.cloud.configuration.Config; -import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeploymentPlan; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.AffinityConflictException; -import com.cloud.utils.DateUtil; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; - -@Local(value = AffinityGroupProcessor.class) -public class HostAntiAffinityProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - - private static final Logger s_logger = Logger.getLogger(HostAntiAffinityProcessor.class); - @Inject - protected UserVmDao _vmDao; - @Inject - protected VMInstanceDao _vmInstanceDao; - @Inject - protected AffinityGroupDao _affinityGroupDao; - @Inject - protected AffinityGroupVMMapDao _affinityGroupVMMapDao; - private int _vmCapacityReleaseInterval; - @Inject - protected ConfigurationDao _configDao; - - @Inject - protected VMReservationDao _reservationDao; - - @Override - public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws AffinityConflictException { - VirtualMachine vm = vmProfile.getVirtualMachine(); - List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); - - for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { - if (vmGroupMapping != null) { - AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); - } - - List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); - groupVMIds.remove(vm.getId()); - - for (Long groupVMId : groupVMIds) { - VMInstanceVO groupVM = _vmInstanceDao.findById(groupVMId); - if (groupVM != null && !groupVM.isRemoved()) { - if (groupVM.getHostId() != null) { - avoid.addHost(groupVM.getHostId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); - } - } else if (VirtualMachine.State.Stopped.equals(groupVM.getState()) && groupVM.getLastHostId() != null) { - long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; - if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - avoid.addHost(groupVM.getLastHostId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + - " is present on the host, in Stopped state but has reserved capacity"); - } - } - } - } - } - } - } - - } - - @Override - public boolean configure(final String name, final Map params) throws ConfigurationException { - super.configure(name, params); - _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); - return true; - } - - @Override - public boolean check(VirtualMachineProfile vmProfile, DeployDestination plannedDestination) throws AffinityConflictException { - - if (plannedDestination.getHost() == null) { - return true; - } - long plannedHostId = plannedDestination.getHost().getId(); - - VirtualMachine vm = vmProfile.getVirtualMachine(); - - List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); - - for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { - // if more than 1 VM's are present in the group then check for - // conflict due to parallel deployment - List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(vmGroupMapping.getAffinityGroupId()); - groupVMIds.remove(vm.getId()); - - for (Long groupVMId : groupVMIds) { - VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); - if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + " reserved on the same host " + - plannedHostId); - } - return false; - } - } - } - return true; - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; +import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.messagebus.MessageSubscriber; + +import org.apache.log4j.Logger; + +import com.cloud.configuration.Config; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.exception.AffinityConflictException; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Local(value = AffinityGroupProcessor.class) +public class HostAntiAffinityProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { + + private static final Logger s_logger = Logger.getLogger(HostAntiAffinityProcessor.class); + @Inject + protected UserVmDao _vmDao; + @Inject + protected VMInstanceDao _vmInstanceDao; + @Inject + protected AffinityGroupDao _affinityGroupDao; + @Inject + protected AffinityGroupVMMapDao _affinityGroupVMMapDao; + private int _vmCapacityReleaseInterval; + @Inject + protected ConfigurationDao _configDao; + + @Inject + protected VMReservationDao _reservationDao; + + @Override + public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws AffinityConflictException { + VirtualMachine vm = vmProfile.getVirtualMachine(); + List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); + + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + if (vmGroupMapping != null) { + AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + } + + List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); + groupVMIds.remove(vm.getId()); + + for (Long groupVMId : groupVMIds) { + VMInstanceVO groupVM = _vmInstanceDao.findById(groupVMId); + if (groupVM != null && !groupVM.isRemoved()) { + if (groupVM.getHostId() != null) { + avoid.addHost(groupVM.getHostId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + } + } else if (VirtualMachine.State.Stopped.equals(groupVM.getState()) && groupVM.getLastHostId() != null) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; + if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { + avoid.addHost(groupVM.getLastHostId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + + " is present on the host, in Stopped state but has reserved capacity"); + } + } + } + } + } + } + } + + } + + @Override + public boolean configure(final String name, final Map params) throws ConfigurationException { + super.configure(name, params); + _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); + return true; + } + + @Override + public boolean check(VirtualMachineProfile vmProfile, DeployDestination plannedDestination) throws AffinityConflictException { + + if (plannedDestination.getHost() == null) { + return true; + } + long plannedHostId = plannedDestination.getHost().getId(); + + VirtualMachine vm = vmProfile.getVirtualMachine(); + + List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); + + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + // if more than 1 VM's are present in the group then check for + // conflict due to parallel deployment + List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(vmGroupMapping.getAffinityGroupId()); + groupVMIds.remove(vm.getId()); + + for (Long groupVMId : groupVMIds) { + VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); + if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + " reserved on the same host " + + plannedHostId); + } + return false; + } + } + } + return true; + } + +} diff --git a/pom.xml b/pom.xml index 80a572b853e..71d052fe8c2 100644 --- a/pom.xml +++ b/pom.xml @@ -445,7 +445,6 @@ ${basedir}/${cs.target.dir}/classes ${basedir}/${cs.target.dir}/test-classes - diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 4adce4dbc9b..cdaba75c0ed 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -1,1327 +1,1327 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Timer; -import java.util.TreeSet; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.affinity.AffinityGroupProcessor; -import org.apache.cloudstack.affinity.AffinityGroupService; -import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; -import org.apache.cloudstack.affinity.AffinityGroupVO; -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; -import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.messagebus.MessageBus; -import org.apache.cloudstack.framework.messagebus.MessageSubscriber; -import org.apache.cloudstack.managed.context.ManagedContextTimerTask; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; - -import com.cloud.capacity.CapacityManager; -import com.cloud.capacity.dao.CapacityDao; -import com.cloud.configuration.Config; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterDetailsVO; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.DedicatedResourceVO; -import com.cloud.dc.Pod; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.dao.DedicatedResourceDao; -import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; -import com.cloud.deploy.dao.PlannerHostReservationDao; -import com.cloud.exception.AffinityConflictException; -import com.cloud.exception.ConnectionException; -import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.offering.ServiceOffering; -import com.cloud.org.Cluster; -import com.cloud.org.Grouping; -import com.cloud.resource.ResourceState; -import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.ScopeType; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.DiskOfferingDao; -import com.cloud.storage.dao.GuestOSCategoryDao; -import com.cloud.storage.dao.GuestOSDao; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.user.AccountManager; -import com.cloud.utils.DateUtil; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.component.Manager; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallback; -import com.cloud.utils.db.TransactionCallbackNoReturn; -import com.cloud.utils.db.TransactionStatus; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.StateListener; -import com.cloud.vm.DiskProfile; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.Event; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; -import com.cloud.agent.AgentManager; -import com.cloud.agent.Listener; -import com.cloud.agent.api.AgentControlAnswer; -import com.cloud.agent.api.AgentControlCommand; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupRoutingCommand; -import com.cloud.agent.manager.allocator.HostAllocator; - -@Local(value = {DeploymentPlanningManager.class}) -public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener, StateListener { - - private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); - @Inject - AgentManager _agentMgr; - @Inject - protected UserVmDao _vmDao; - @Inject - protected VMInstanceDao _vmInstanceDao; - @Inject - protected AffinityGroupDao _affinityGroupDao; - @Inject - protected AffinityGroupVMMapDao _affinityGroupVMMapDao; - @Inject - AffinityGroupService _affinityGroupService; - @Inject - DataCenterDao _dcDao; - @Inject - PlannerHostReservationDao _plannerHostReserveDao; - private int _vmCapacityReleaseInterval; - @Inject - MessageBus _messageBus; - private Timer _timer = null; - private long _hostReservationReleasePeriod = 60L * 60L * 1000L; // one hour by default - @Inject - protected VMReservationDao _reservationDao; - - private static final long INITIAL_RESERVATION_RELEASE_CHECKER_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds - protected long _nodeId = -1; - - protected List _storagePoolAllocators; - - public List getStoragePoolAllocators() { - return _storagePoolAllocators; - } - - public void setStoragePoolAllocators(List _storagePoolAllocators) { - this._storagePoolAllocators = _storagePoolAllocators; - } - - protected List _hostAllocators; - - public List getHostAllocators() { - return _hostAllocators; - } - - public void setHostAllocators(List _hostAllocators) { - this._hostAllocators = _hostAllocators; - } - - @Inject - protected HostDao _hostDao; - @Inject - protected HostPodDao _podDao; - @Inject - protected ClusterDao _clusterDao; - @Inject - protected DedicatedResourceDao _dedicatedDao; - @Inject - protected GuestOSDao _guestOSDao = null; - @Inject - protected GuestOSCategoryDao _guestOSCategoryDao = null; - @Inject - protected DiskOfferingDao _diskOfferingDao; - @Inject - protected StoragePoolHostDao _poolHostDao; - - @Inject - protected VolumeDao _volsDao; - @Inject - protected CapacityManager _capacityMgr; - @Inject - protected ConfigurationDao _configDao; - @Inject - protected PrimaryDataStoreDao _storagePoolDao; - @Inject - protected CapacityDao _capacityDao; - @Inject - protected AccountManager _accountMgr; - @Inject - protected StorageManager _storageMgr; - @Inject - DataStoreManager dataStoreMgr; - @Inject - protected ClusterDetailsDao _clusterDetailsDao; - - protected List _planners; - - public List getPlanners() { - return _planners; - } - - public void setPlanners(List _planners) { - this._planners = _planners; - } - - protected List _affinityProcessors; - - public List getAffinityGroupProcessors() { - return _affinityProcessors; - } - - public void setAffinityGroupProcessors(List affinityProcessors) { - this._affinityProcessors = affinityProcessors; - } - - @Override - public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException, - AffinityConflictException { - - // call affinitygroup chain - VirtualMachine vm = vmProfile.getVirtualMachine(); - long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); - DataCenter dc = _dcDao.findById(vm.getDataCenterId()); - - if (vmGroupCount > 0) { - for (AffinityGroupProcessor processor : _affinityProcessors) { - processor.process(vmProfile, plan, avoids); - } - } - - if (vm.getType() == VirtualMachine.Type.User) { - checkForNonDedicatedResources(vmProfile, dc, avoids); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); - } - - // call planners - //DataCenter dc = _dcDao.findById(vm.getDataCenterId()); - // check if datacenter is in avoid set - if (avoids.shouldAvoid(dc)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); - } - return null; - } - - ServiceOffering offering = vmProfile.getServiceOffering(); - String plannerName = offering.getDeploymentPlanner(); - if (plannerName == null) { - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - plannerName = "BareMetalPlanner"; - } else { - plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); - } - } - DeploymentPlanner planner = null; - for (DeploymentPlanner plannerInList : _planners) { - if (plannerName.equals(plannerInList.getName())) { - planner = plannerInList; - break; - } - } - - int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); - - s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + - ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); - - s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); - } - - String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); - - if (plan.getHostId() != null && haVmTag == null) { - Long hostIdSpecified = plan.getHostId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified); - } - HostVO host = _hostDao.findById(hostIdSpecified); - if (host == null) { - s_logger.debug("The specified host cannot be found"); - } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The specified host is in avoid set"); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + - host.getClusterId()); - } - - // search for storage under the zone, pod, cluster of the host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, - plan.getReservationContext()); - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - // choose the potential pool for this VM for this host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, - getPlannerUsage(planner, vmProfile, plan, avoids)); - if (potentialResources != null) { - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since - // we don't have to prepare this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } - } - s_logger.debug("Cannnot deploy to specified host, returning."); - return null; - } - - if (vm.getLastHostId() != null && haVmTag == null) { - s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); - - HostVO host = _hostDao.findById(vm.getLastHostId()); - if (host == null) { - s_logger.debug("The last host of this VM cannot be found"); - } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The last host of this VM is in avoid set"); - } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - s_logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - } else { - if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { - long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - // search for storage under the zone, pod, cluster of - // the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - // choose the potential pool for this VM for this host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, - getPlannerUsage(planner, vmProfile, plan, avoids)); - if (potentialResources != null) { - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from - // destination, since we don't have to prepare - // this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } - } else { - s_logger.debug("The last host of this VM does not have enough capacity"); - } - } else { - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + - host.getResourceState()); - } - } - s_logger.debug("Cannot choose the last host to deploy this VM "); - } - - DeployDestination dest = null; - List clusterList = null; - - if (planner != null && planner.canHandle(vmProfile, plan, avoids)) { - while (true) { - - if (planner instanceof DeploymentClusterPlanner) { - - ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), - avoids.getPoolsToAvoid()); - - clusterList = ((DeploymentClusterPlanner)planner).orderClusters(vmProfile, plan, avoids); - - if (clusterList != null && !clusterList.isEmpty()) { - // planner refactoring. call allocators to list hosts - ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), - avoids.getHostsToAvoid(), avoids.getPoolsToAvoid()); - - resetAvoidSet(plannerAvoidOutput, plannerAvoidInput); - - dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput); - if (dest != null) { - return dest; - } - // reset the avoid input to the planners - resetAvoidSet(avoids, plannerAvoidOutput); - - } else { - return null; - } - } else { - dest = planner.plan(vmProfile, plan, avoids); - if (dest != null) { - long hostId = dest.getHost().getId(); - avoids.addHost(dest.getHost().getId()); - - if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { - // found destination - return dest; - } else { - // find another host - seems some concurrent - // deployment picked it up for dedicated access - continue; - } - } else { - return null; - } - } - } - } - - return dest; - } - - private void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) { - boolean isExplicit = false; - VirtualMachine vm = vmProfile.getVirtualMachine(); - - // check if zone is dedicated. if yes check if vm owner has acess to it. - DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(dc.getId()); - if (dedicatedZone != null) { - long accountDomainId = vmProfile.getOwner().getDomainId(); - long accountId = vmProfile.getOwner().getAccountId(); - - // If a zone is dedicated to an account then all hosts in this zone - // will be explicitly dedicated to - // that account. So there won't be any shared hosts in the zone, the - // only way to deploy vms from that - // account will be to use explicit dedication affinity group. - if (dedicatedZone.getAccountId() != null) { - if (dedicatedZone.getAccountId().equals(accountId)) { - return; - } else { - throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user account " + vmProfile.getOwner()); - } - } - - // if zone is dedicated to a domain. Check owner's access to the - // domain level dedication group - if (!_affinityGroupService.isAffinityGroupAvailableInDomain(dedicatedZone.getAffinityGroupId(), accountDomainId)) { - throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user domain " + vmProfile.getOwner()); - } - - } - - // check affinity group of type Explicit dedication exists. If No put - // dedicated pod/cluster/host in avoid list - List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), "ExplicitDedication"); - - if (vmGroupMappings != null && !vmGroupMappings.isEmpty()) { - isExplicit = true; - } - - if (!isExplicit) { - //add explicitly dedicated resources in avoidList - - List allPodsInDc = _podDao.listAllPods(dc.getId()); - List allDedicatedPods = _dedicatedDao.listAllPods(); - allPodsInDc.retainAll(allDedicatedPods); - avoids.addPodList(allPodsInDc); - - List allClustersInDc = _clusterDao.listAllCusters(dc.getId()); - List allDedicatedClusters = _dedicatedDao.listAllClusters(); - allClustersInDc.retainAll(allDedicatedClusters); - avoids.addClusterList(allClustersInDc); - - List allHostsInDc = _hostDao.listAllHosts(dc.getId()); - List allDedicatedHosts = _dedicatedDao.listAllHosts(); - allHostsInDc.retainAll(allDedicatedHosts); - avoids.addHostList(allHostsInDc); - } - } - - private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) { - if (avoidSet.getDataCentersToAvoid() != null && removeSet.getDataCentersToAvoid() != null) { - avoidSet.getDataCentersToAvoid().removeAll(removeSet.getDataCentersToAvoid()); - } - if (avoidSet.getPodsToAvoid() != null && removeSet.getPodsToAvoid() != null) { - avoidSet.getPodsToAvoid().removeAll(removeSet.getPodsToAvoid()); - } - if (avoidSet.getClustersToAvoid() != null && removeSet.getClustersToAvoid() != null) { - avoidSet.getClustersToAvoid().removeAll(removeSet.getClustersToAvoid()); - } - if (avoidSet.getHostsToAvoid() != null && removeSet.getHostsToAvoid() != null) { - avoidSet.getHostsToAvoid().removeAll(removeSet.getHostsToAvoid()); - } - if (avoidSet.getPoolsToAvoid() != null && removeSet.getPoolsToAvoid() != null) { - avoidSet.getPoolsToAvoid().removeAll(removeSet.getPoolsToAvoid()); - } - } - - private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) - throws InsufficientServerCapacityException { - if (planner != null && planner instanceof DeploymentClusterPlanner) { - return ((DeploymentClusterPlanner)planner).getResourceUsage(vmProfile, plan, avoids); - } else { - return DeploymentPlanner.PlannerResourceUsage.Shared; - } - - } - - @DB - private boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerResourceUsage resourceUsageRequired) { - // TODO Auto-generated method stub - // check if this host has been picked up by some other planner - // exclusively - // if planner can work with shared host, check if this host has - // been marked as 'shared' - // else if planner needs dedicated host, - - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); - if (reservationEntry != null) { - final long id = reservationEntry.getId(); - PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage(); - - if (hostResourceType != null) { - if (hostResourceType == resourceUsageRequired) { - return true; - } else { - s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + hostResourceType); - return false; - } - } else { - final PlannerResourceUsage hostResourceTypeFinal = hostResourceType; - // reserve the host for required resourceType - // let us lock the reservation entry before updating. - return Transaction.execute(new TransactionCallback() { - @Override - public Boolean doInTransaction(TransactionStatus status) { - final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); - if (lockedEntry == null) { - s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); - return false; - } - // check before updating - if (lockedEntry.getResourceUsage() == null) { - lockedEntry.setResourceUsage(resourceUsageRequired); - _plannerHostReserveDao.persist(lockedEntry); - return true; - } else { - // someone updated it earlier. check if we can still use it - if (lockedEntry.getResourceUsage() == resourceUsageRequired) { - return true; - } else { - s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + - hostResourceTypeFinal); - return false; - } - } - } - }); - - } - - } - - return false; - } - - @DB - public boolean checkHostReservationRelease(final Long hostId) { - - if (hostId != null) { - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); - if (reservationEntry != null && reservationEntry.getResourceUsage() != null) { - - // check if any VMs are starting or running on this host - List vms = _vmInstanceDao.listUpByHostId(hostId); - if (vms.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); - } - return false; - } - - List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); - if (vmsByLastHostId.size() > 0) { - // check if any VMs are within skip.counting.hours, if yes - // we - // cannot release the host - for (VMInstanceVO stoppedVM : vmsByLastHostId) { - long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime().getTime()) / 1000; - if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); - } - return false; - } - } - } - - // check if any VMs are stopping on or migrating to this host - List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); - if (vmsStoppingMigratingByHostId.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs stopping/migrating on host " + hostId); - } - return false; - } - - // check if any VMs are in starting state with no hostId set yet - // - - // just ignore host release to avoid race condition - List vmsStartingNoHost = _vmInstanceDao.listStartingWithNoHostId(); - - if (vmsStartingNoHost.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs starting as of now and no hostId yet stored"); - } - return false; - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); - } - - final long id = reservationEntry.getId(); - - return Transaction.execute(new TransactionCallback() { - @Override - public Boolean doInTransaction(TransactionStatus status) { - final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); - if (lockedEntry == null) { - s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); - return false; - } - // check before updating - if (lockedEntry.getResourceUsage() != null) { - lockedEntry.setResourceUsage(null); - _plannerHostReserveDao.persist(lockedEntry); - return true; - } - - return false; - } - }); - } - - } - return false; - } - - class HostReservationReleaseChecker extends ManagedContextTimerTask { - @Override - protected void runInContext() { - try { - s_logger.debug("Checking if any host reservation can be released ... "); - checkHostReservations(); - s_logger.debug("Done running HostReservationReleaseChecker ... "); - } catch (Throwable t) { - s_logger.error("Exception in HostReservationReleaseChecker", t); - } - } - } - - private void checkHostReservations() { - List reservedHosts = _plannerHostReserveDao.listAllReservedHosts(); - - for (PlannerHostReservationVO hostReservation : reservedHosts) { - HostVO host = _hostDao.findById(hostReservation.getHostId()); - if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { - checkHostReservationRelease(hostReservation.getHostId()); - } - } - - } - - @Override - public boolean processAnswers(long agentId, long seq, Answer[] answers) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean processCommands(long agentId, long seq, Command[] commands) { - // TODO Auto-generated method stub - return false; - } - - @Override - public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { - if (!(cmd instanceof StartupRoutingCommand)) { - return; - } - - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); - if (reservationEntry == null) { - // record the host in this table - PlannerHostReservationVO newHost = new PlannerHostReservationVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId()); - _plannerHostReserveDao.persist(newHost); - } - - } - - @Override - public boolean processDisconnect(long agentId, Status state) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean isRecurring() { - // TODO Auto-generated method stub - return false; - } - - @Override - public int getTimeout() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public boolean processTimeout(long agentId, long seq) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean configure(final String name, final Map params) throws ConfigurationException { - _agentMgr.registerForHostEvents(this, true, false, true); - VirtualMachine.State.getStateMachine().registerListener(this); - _messageBus.subscribe("VM_ReservedCapacity_Free", new MessageSubscriber() { - @Override - public void onPublishMessage(String senderAddress, String subject, Object obj) { - VMInstanceVO vm = ((VMInstanceVO)obj); - s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + ", checking if host reservation can be released for host:" + - vm.getLastHostId()); - Long hostId = vm.getLastHostId(); - checkHostReservationRelease(hostId); - } - }); - - _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); - - String hostReservationReleasePeriod = _configDao.getValue(Config.HostReservationReleasePeriod.key()); - if (hostReservationReleasePeriod != null) { - _hostReservationReleasePeriod = Long.parseLong(hostReservationReleasePeriod); - if (_hostReservationReleasePeriod <= 0) - _hostReservationReleasePeriod = Long.parseLong(Config.HostReservationReleasePeriod.getDefaultValue()); - } - - _timer = new Timer("HostReservationReleaseChecker"); - - _nodeId = ManagementServerNode.getManagementServerId(); - - return super.configure(name, params); - } - - @Override - public boolean start() { - _timer.schedule(new HostReservationReleaseChecker(), INITIAL_RESERVATION_RELEASE_CHECKER_DELAY, _hostReservationReleasePeriod); - cleanupVMReservations(); - return true; - } - - @Override - public boolean stop() { - _timer.cancel(); - return true; - } - - @Override - public void cleanupVMReservations() { - List reservations = _reservationDao.listAll(); - - for (VMReservationVO reserv : reservations) { - VMInstanceVO vm = _vmInstanceDao.findById(reserv.getVmId()); - if (vm != null) { - if (vm.getState() == State.Starting || (vm.getState() == State.Stopped && vm.getLastHostId() == null)) { - continue; - } else { - // delete reservation - _reservationDao.remove(reserv.getId()); - } - } else { - // delete reservation - _reservationDao.remove(reserv.getId()); - } - } - } - - // /refactoring planner methods - private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc, - DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList PlannerAvoidOutput) { - - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List to consider: " + clusterList); - } - - for (Long clusterId : clusterList) { - ClusterVO clusterVO = _clusterDao.findById(clusterId); - - if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { - s_logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); - avoid.addCluster(clusterVO.getId()); - continue; - } - - s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); - // search for resources(hosts and storage) under this zone, pod, - // cluster. - DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, - plan.getReservationContext()); - - // find suitable hosts under this cluster, need as many hosts as we - // get. - List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - // if found suitable hosts in this cluster, find suitable storage - // pools for each volume of the VM - if (suitableHosts != null && !suitableHosts.isEmpty()) { - if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { - Pod pod = _podDao.findById(clusterVO.getPodId()); - DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); - return dest; - } - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - // choose the potential host and pool for the VM - if (!suitableVolumeStoragePools.isEmpty()) { - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid, - resourceUsageRequired); - - if (potentialResources != null) { - Pod pod = _podDao.findById(clusterVO.getPodId()); - Host host = _hostDao.findById(potentialResources.first().getId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since - // we don't have to prepare this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } else { - s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId); - } - } else { - s_logger.debug("No suitable hosts found under this Cluster: " + clusterId); - } - - if (canAvoidCluster(clusterVO, avoid, PlannerAvoidOutput, vmProfile)) { - avoid.addCluster(clusterVO.getId()); - } - } - s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); - return null; - } - - private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput, VirtualMachineProfile vmProfile) { - - ExcludeList allocatorAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), - avoids.getPoolsToAvoid()); - - // remove any hosts/pools that the planners might have added - // to get the list of hosts/pools that Allocators flagged as 'avoid' - - resetAvoidSet(allocatorAvoidOutput, plannerAvoidOutput); - - // if all hosts or all pools in the cluster are in avoid set after this - // pass, then put the cluster in avoid set. - boolean avoidAllHosts = true, avoidAllPools = true; - - List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(), clusterVO.getPodId(), clusterVO.getDataCenterId(), null); - for (HostVO host : allhostsInCluster) { - if (!allocatorAvoidOutput.shouldAvoid(host)) { - // there's some host in the cluster that is not yet in avoid set - avoidAllHosts = false; - break; - } - } - - // all hosts in avoid set, avoid the cluster. Otherwise check the pools - if (avoidAllHosts) { - return true; - } - - // Cluster can be put in avoid set in following scenarios: - // 1. If storage allocators haven't put any pools in avoid set means either no pools in cluster - // or pools not suitable for the allocators to handle or there is no - // linkage of any suitable host to any of the pools in cluster - // 2. If all 'shared' or 'local' pools are in avoid set - if (allocatorAvoidOutput.getPoolsToAvoid() != null && !allocatorAvoidOutput.getPoolsToAvoid().isEmpty()) { - - Pair storageRequirements = findVMStorageRequirements(vmProfile); - boolean vmRequiresSharedStorage = storageRequirements.first(); - boolean vmRequiresLocalStorege = storageRequirements.second(); - - if (vmRequiresSharedStorage) { - // check shared pools - List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); - for (StoragePoolVO pool : allPoolsInCluster) { - if (!allocatorAvoidOutput.shouldAvoid(pool)) { - // there's some pool in the cluster that is not yet in avoid set - avoidAllPools = false; - break; - } - } - } - - if (vmRequiresLocalStorege) { - // check local pools - List allLocalPoolsInCluster = _storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); - for (StoragePoolVO pool : allLocalPoolsInCluster) { - if (!allocatorAvoidOutput.shouldAvoid(pool)) { - // there's some pool in the cluster that is not yet - // in avoid set - avoidAllPools = false; - break; - } - } - } - } - - if (avoidAllHosts || avoidAllPools) { - return true; - } - return false; - } - - private Pair findVMStorageRequirements(VirtualMachineProfile vmProfile) { - - boolean requiresShared = false, requiresLocal = false; - - List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); - - // for each volume find whether shared or local pool is required - for (VolumeVO toBeCreated : volumesTobeCreated) { - DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); - - if (diskOffering != null) { - if (diskOffering.getUseLocalStorage()) { - requiresLocal = true; - } else { - requiresShared = true; - } - } - } - - return new Pair(requiresShared, requiresLocal); - } - - protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, - ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired) { - s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); - - boolean hostCanAccessPool = false; - boolean haveEnoughSpace = false; - Map storage = new HashMap(); - TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { - @Override - public int compare(Volume v1, Volume v2) { - if (v1.getSize() < v2.getSize()) - return 1; - else - return -1; - } - }); - volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); - boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; - for (Host potentialHost : suitableHosts) { - Map> volumeAllocationMap = new HashMap>(); - for (Volume vol : volumesOrderBySizeDesc) { - haveEnoughSpace = false; - s_logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); - List volumePoolList = suitableVolumeStoragePools.get(vol); - hostCanAccessPool = false; - for (StoragePool potentialSPool : volumePoolList) { - if (hostCanAccessSPool(potentialHost, potentialSPool)) { - hostCanAccessPool = true; - if (multipleVolume) { - List requestVolumes = null; - if (volumeAllocationMap.containsKey(potentialSPool)) - requestVolumes = volumeAllocationMap.get(potentialSPool); - else - requestVolumes = new ArrayList(); - requestVolumes.add(vol); - - if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) - continue; - volumeAllocationMap.put(potentialSPool, requestVolumes); - } - storage.put(vol, potentialSPool); - haveEnoughSpace = true; - break; - } - } - if (!hostCanAccessPool) { - break; - } - if (!haveEnoughSpace) { - s_logger.warn("insufficient capacity to allocate all volumes"); - break; - } - } - if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { - s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); - return new Pair>(potentialHost, storage); - } else { - avoid.addHost(potentialHost.getId()); - } - } - s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); - return null; - } - - protected boolean hostCanAccessSPool(Host host, StoragePool pool) { - boolean hostCanAccessSPool = false; - - StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); - if (hostPoolLinkage != null) { - hostCanAccessSPool = true; - } - - s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); - return hostCanAccessSPool; - } - - protected List findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - List suitableHosts = new ArrayList(); - for (HostAllocator allocator : _hostAllocators) { - suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); - if (suitableHosts != null && !suitableHosts.isEmpty()) { - break; - } - } - - if (suitableHosts.isEmpty()) { - s_logger.debug("No suitable hosts found"); - } - return suitableHosts; - } - - protected Pair>, List> - findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); - Map> suitableVolumeStoragePools = new HashMap>(); - List readyAndReusedVolumes = new ArrayList(); - - // There should be atleast the ROOT volume of the VM in usable state - if (volumesTobeCreated.isEmpty()) { - throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM"); - } - - // for each volume find list of suitable storage pools by calling the - // allocators - for (VolumeVO toBeCreated : volumesTobeCreated) { - s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); - - // If the plan specifies a poolId, it means that this VM's ROOT - // volume is ready and the pool should be reused. - // In this case, also check if rest of the volumes are ready and can - // be reused. - if (plan.getPoolId() != null) { - s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); - List suitablePools = new ArrayList(); - StoragePool pool = null; - if (toBeCreated.getPoolId() != null) { - pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); - } else { - pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); - } - - if (!pool.isInMaintenance()) { - if (!avoid.shouldAvoid(pool)) { - long exstPoolDcId = pool.getDataCenterId(); - long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; - long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; - boolean canReusePool = false; - if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) { - canReusePool = true; - } else if (plan.getDataCenterId() == exstPoolDcId) { - DataStore dataStore = this.dataStoreMgr.getPrimaryDataStore(pool.getId()); - if (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE) { - canReusePool = true; - } - } else { - s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); - canReusePool = false; - } - - if (canReusePool) { - s_logger.debug("Planner need not allocate a pool for this volume since its READY"); - suitablePools.add(pool); - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { - readyAndReusedVolumes.add(toBeCreated); - } - continue; - } - } else { - s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); - } - } else { - s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("We need to allocate new storagepool for this volume"); - } - if (!isRootAdmin(plan.getReservationContext())) { - if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); - s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); - } - // Cannot find suitable storage pools under this cluster for - // this volume since allocation_state is disabled. - // - remove any suitable pools found for other volumes. - // All volumes should get suitable pools under this cluster; - // else we cant use this cluster. - suitableVolumeStoragePools.clear(); - break; - } - } - - s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); - - DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); - DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); - - boolean useLocalStorage = false; - if (vmProfile.getType() != VirtualMachine.Type.User) { - String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key()); - if (ssvmUseLocalStorage.equalsIgnoreCase("true")) { - useLocalStorage = true; - } - } else { - useLocalStorage = diskOffering.getUseLocalStorage(); - - // TODO: this is a hacking fix for the problem of deploy - // ISO-based VM on local storage - // when deploying VM based on ISO, we have a service offering - // and an additional disk offering, use-local storage flag is - // actually - // saved in service offering, overrde the flag from service - // offering when it is a ROOT disk - if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { - if (toBeCreated.getVolumeType() == Volume.Type.ROOT) - useLocalStorage = true; - } - } - diskProfile.setUseLocalStorage(useLocalStorage); - - boolean foundPotentialPools = false; - for (StoragePoolAllocator allocator : _storagePoolAllocators) { - final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); - if (suitablePools != null && !suitablePools.isEmpty()) { - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - foundPotentialPools = true; - break; - } - } - - if (!foundPotentialPools) { - s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); - // No suitable storage pools found under this cluster for this - // volume. - remove any suitable pools found for other volumes. - // All volumes should get suitable pools under this cluster; - // else we cant use this cluster. - suitableVolumeStoragePools.clear(); - break; - } - } - - if (suitableVolumeStoragePools.isEmpty()) { - s_logger.debug("No suitable pools found"); - } - - return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); - } - - private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) { - // Check if the zone exists in the system - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { - s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); - return false; - } - - Pod pod = _podDao.findById(podId); - if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { - s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); - return false; - } - - Cluster cluster = _clusterDao.findById(clusterId); - if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { - s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); - return false; - } - - return true; - } - - private boolean isRootAdmin(ReservationContext reservationContext) { - if (reservationContext != null) { - if (reservationContext.getAccount() != null) { - return _accountMgr.isRootAdmin(reservationContext.getAccount().getType()); - } else { - return false; - } - } - return false; - } - - @DB - @Override - public String finalizeReservation(final DeployDestination plannedDestination, final VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) - throws InsufficientServerCapacityException, AffinityConflictException { - - final VirtualMachine vm = vmProfile.getVirtualMachine(); - final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); - - return Transaction.execute(new TransactionCallback() { - @Override - public String doInTransaction(TransactionStatus status) { - boolean saveReservation = true; - - if (vmGroupCount > 0) { - List groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId()); - SearchCriteria criteria = _affinityGroupDao.createSearchCriteria(); - criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()])); - List groups = _affinityGroupDao.lockRows(criteria, null, true); - - for (AffinityGroupProcessor processor : _affinityProcessors) { - if (!processor.check(vmProfile, plannedDestination)) { - saveReservation = false; - break; - } - } - } - - if (saveReservation) { - VMReservationVO vmReservation = new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(), plannedDestination.getPod().getId(), - plannedDestination.getCluster().getId(), plannedDestination.getHost().getId()); - Map volumeReservationMap = new HashMap(); - - if (vm.getHypervisorType() != HypervisorType.BareMetal) { - for (Volume vo : plannedDestination.getStorageForDisks().keySet()) { - volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId()); - } - vmReservation.setVolumeReservation(volumeReservationMap); - } - _reservationDao.persist(vmReservation); - return vmReservation.getUuid(); - } - - return null; - } - }); - } - - @Override - public boolean preStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, boolean status, Object opaque) { - return true; - } - - @Override - public boolean postStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, boolean status, Object opaque) { - if (!status) { - return false; - } - if ((oldState == State.Starting) && (newState != State.Starting)) { - // cleanup all VM reservation entries - SearchCriteria sc = _reservationDao.createSearchCriteria(); - sc.addAnd("vmId", SearchCriteria.Op.EQ, vo.getId()); - _reservationDao.expunge(sc); - } - return true; - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TreeSet; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.affinity.AffinityGroupProcessor; +import org.apache.cloudstack.affinity.AffinityGroupService; +import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; +import org.apache.cloudstack.affinity.AffinityGroupVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; +import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.framework.messagebus.MessageSubscriber; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.log4j.Logger; + +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.Config; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.Pod; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.dao.PlannerHostReservationDao; +import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.ConnectionException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; +import com.cloud.org.Grouping; +import com.cloud.resource.ResourceState; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.AccountManager; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.component.Manager; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.StateListener; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.Event; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.agent.AgentManager; +import com.cloud.agent.Listener; +import com.cloud.agent.api.AgentControlAnswer; +import com.cloud.agent.api.AgentControlCommand; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; +import com.cloud.agent.manager.allocator.HostAllocator; + +@Local(value = {DeploymentPlanningManager.class}) +public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener, StateListener { + + private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); + @Inject + AgentManager _agentMgr; + @Inject + protected UserVmDao _vmDao; + @Inject + protected VMInstanceDao _vmInstanceDao; + @Inject + protected AffinityGroupDao _affinityGroupDao; + @Inject + protected AffinityGroupVMMapDao _affinityGroupVMMapDao; + @Inject + AffinityGroupService _affinityGroupService; + @Inject + DataCenterDao _dcDao; + @Inject + PlannerHostReservationDao _plannerHostReserveDao; + private int _vmCapacityReleaseInterval; + @Inject + MessageBus _messageBus; + private Timer _timer = null; + private long _hostReservationReleasePeriod = 60L * 60L * 1000L; // one hour by default + @Inject + protected VMReservationDao _reservationDao; + + private static final long INITIAL_RESERVATION_RELEASE_CHECKER_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds + protected long _nodeId = -1; + + protected List _storagePoolAllocators; + + public List getStoragePoolAllocators() { + return _storagePoolAllocators; + } + + public void setStoragePoolAllocators(List _storagePoolAllocators) { + this._storagePoolAllocators = _storagePoolAllocators; + } + + protected List _hostAllocators; + + public List getHostAllocators() { + return _hostAllocators; + } + + public void setHostAllocators(List _hostAllocators) { + this._hostAllocators = _hostAllocators; + } + + @Inject + protected HostDao _hostDao; + @Inject + protected HostPodDao _podDao; + @Inject + protected ClusterDao _clusterDao; + @Inject + protected DedicatedResourceDao _dedicatedDao; + @Inject + protected GuestOSDao _guestOSDao = null; + @Inject + protected GuestOSCategoryDao _guestOSCategoryDao = null; + @Inject + protected DiskOfferingDao _diskOfferingDao; + @Inject + protected StoragePoolHostDao _poolHostDao; + + @Inject + protected VolumeDao _volsDao; + @Inject + protected CapacityManager _capacityMgr; + @Inject + protected ConfigurationDao _configDao; + @Inject + protected PrimaryDataStoreDao _storagePoolDao; + @Inject + protected CapacityDao _capacityDao; + @Inject + protected AccountManager _accountMgr; + @Inject + protected StorageManager _storageMgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + protected ClusterDetailsDao _clusterDetailsDao; + + protected List _planners; + + public List getPlanners() { + return _planners; + } + + public void setPlanners(List _planners) { + this._planners = _planners; + } + + protected List _affinityProcessors; + + public List getAffinityGroupProcessors() { + return _affinityProcessors; + } + + public void setAffinityGroupProcessors(List affinityProcessors) { + this._affinityProcessors = affinityProcessors; + } + + @Override + public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException, + AffinityConflictException { + + // call affinitygroup chain + VirtualMachine vm = vmProfile.getVirtualMachine(); + long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); + DataCenter dc = _dcDao.findById(vm.getDataCenterId()); + + if (vmGroupCount > 0) { + for (AffinityGroupProcessor processor : _affinityProcessors) { + processor.process(vmProfile, plan, avoids); + } + } + + if (vm.getType() == VirtualMachine.Type.User) { + checkForNonDedicatedResources(vmProfile, dc, avoids); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); + } + + // call planners + //DataCenter dc = _dcDao.findById(vm.getDataCenterId()); + // check if datacenter is in avoid set + if (avoids.shouldAvoid(dc)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + } + return null; + } + + ServiceOffering offering = vmProfile.getServiceOffering(); + String plannerName = offering.getDeploymentPlanner(); + if (plannerName == null) { + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + plannerName = "BareMetalPlanner"; + } else { + plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); + } + } + DeploymentPlanner planner = null; + for (DeploymentPlanner plannerInList : _planners) { + if (plannerName.equals(plannerInList.getName())) { + planner = plannerInList; + break; + } + } + + int cpu_requested = offering.getCpu() * offering.getSpeed(); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); + + s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); + + s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); + } + + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); + + if (plan.getHostId() != null && haVmTag == null) { + Long hostIdSpecified = plan.getHostId(); + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified); + } + HostVO host = _hostDao.findById(hostIdSpecified); + if (host == null) { + s_logger.debug("The specified host cannot be found"); + } else if (avoids.shouldAvoid(host)) { + s_logger.debug("The specified host is in avoid set"); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + + host.getClusterId()); + } + + // search for storage under the zone, pod, cluster of the host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, + plan.getReservationContext()); + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, + getPlannerUsage(planner, vmProfile, plan, avoids)); + if (potentialResources != null) { + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from destination, since + // we don't have to prepare this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } + s_logger.debug("Cannnot deploy to specified host, returning."); + return null; + } + + if (vm.getLastHostId() != null && haVmTag == null) { + s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); + + HostVO host = _hostDao.findById(vm.getLastHostId()); + if (host == null) { + s_logger.debug("The last host of this VM cannot be found"); + } else if (avoids.shouldAvoid(host)) { + s_logger.debug("The last host of this VM is in avoid set"); + } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { + s_logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); + } else { + if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + s_logger.debug("The last host of this VM is UP and has enough capacity"); + s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); + // search for storage under the zone, pod, cluster of + // the last host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, + getPlannerUsage(planner, vmProfile, plan, avoids)); + if (potentialResources != null) { + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from + // destination, since we don't have to prepare + // this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } else { + s_logger.debug("The last host of this VM does not have enough capacity"); + } + } else { + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + + host.getResourceState()); + } + } + s_logger.debug("Cannot choose the last host to deploy this VM "); + } + + DeployDestination dest = null; + List clusterList = null; + + if (planner != null && planner.canHandle(vmProfile, plan, avoids)) { + while (true) { + + if (planner instanceof DeploymentClusterPlanner) { + + ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), + avoids.getPoolsToAvoid()); + + clusterList = ((DeploymentClusterPlanner)planner).orderClusters(vmProfile, plan, avoids); + + if (clusterList != null && !clusterList.isEmpty()) { + // planner refactoring. call allocators to list hosts + ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), + avoids.getHostsToAvoid(), avoids.getPoolsToAvoid()); + + resetAvoidSet(plannerAvoidOutput, plannerAvoidInput); + + dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput); + if (dest != null) { + return dest; + } + // reset the avoid input to the planners + resetAvoidSet(avoids, plannerAvoidOutput); + + } else { + return null; + } + } else { + dest = planner.plan(vmProfile, plan, avoids); + if (dest != null) { + long hostId = dest.getHost().getId(); + avoids.addHost(dest.getHost().getId()); + + if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { + // found destination + return dest; + } else { + // find another host - seems some concurrent + // deployment picked it up for dedicated access + continue; + } + } else { + return null; + } + } + } + } + + return dest; + } + + private void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) { + boolean isExplicit = false; + VirtualMachine vm = vmProfile.getVirtualMachine(); + + // check if zone is dedicated. if yes check if vm owner has acess to it. + DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(dc.getId()); + if (dedicatedZone != null) { + long accountDomainId = vmProfile.getOwner().getDomainId(); + long accountId = vmProfile.getOwner().getAccountId(); + + // If a zone is dedicated to an account then all hosts in this zone + // will be explicitly dedicated to + // that account. So there won't be any shared hosts in the zone, the + // only way to deploy vms from that + // account will be to use explicit dedication affinity group. + if (dedicatedZone.getAccountId() != null) { + if (dedicatedZone.getAccountId().equals(accountId)) { + return; + } else { + throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user account " + vmProfile.getOwner()); + } + } + + // if zone is dedicated to a domain. Check owner's access to the + // domain level dedication group + if (!_affinityGroupService.isAffinityGroupAvailableInDomain(dedicatedZone.getAffinityGroupId(), accountDomainId)) { + throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user domain " + vmProfile.getOwner()); + } + + } + + // check affinity group of type Explicit dedication exists. If No put + // dedicated pod/cluster/host in avoid list + List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), "ExplicitDedication"); + + if (vmGroupMappings != null && !vmGroupMappings.isEmpty()) { + isExplicit = true; + } + + if (!isExplicit) { + //add explicitly dedicated resources in avoidList + + List allPodsInDc = _podDao.listAllPods(dc.getId()); + List allDedicatedPods = _dedicatedDao.listAllPods(); + allPodsInDc.retainAll(allDedicatedPods); + avoids.addPodList(allPodsInDc); + + List allClustersInDc = _clusterDao.listAllCusters(dc.getId()); + List allDedicatedClusters = _dedicatedDao.listAllClusters(); + allClustersInDc.retainAll(allDedicatedClusters); + avoids.addClusterList(allClustersInDc); + + List allHostsInDc = _hostDao.listAllHosts(dc.getId()); + List allDedicatedHosts = _dedicatedDao.listAllHosts(); + allHostsInDc.retainAll(allDedicatedHosts); + avoids.addHostList(allHostsInDc); + } + } + + private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) { + if (avoidSet.getDataCentersToAvoid() != null && removeSet.getDataCentersToAvoid() != null) { + avoidSet.getDataCentersToAvoid().removeAll(removeSet.getDataCentersToAvoid()); + } + if (avoidSet.getPodsToAvoid() != null && removeSet.getPodsToAvoid() != null) { + avoidSet.getPodsToAvoid().removeAll(removeSet.getPodsToAvoid()); + } + if (avoidSet.getClustersToAvoid() != null && removeSet.getClustersToAvoid() != null) { + avoidSet.getClustersToAvoid().removeAll(removeSet.getClustersToAvoid()); + } + if (avoidSet.getHostsToAvoid() != null && removeSet.getHostsToAvoid() != null) { + avoidSet.getHostsToAvoid().removeAll(removeSet.getHostsToAvoid()); + } + if (avoidSet.getPoolsToAvoid() != null && removeSet.getPoolsToAvoid() != null) { + avoidSet.getPoolsToAvoid().removeAll(removeSet.getPoolsToAvoid()); + } + } + + private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) + throws InsufficientServerCapacityException { + if (planner != null && planner instanceof DeploymentClusterPlanner) { + return ((DeploymentClusterPlanner)planner).getResourceUsage(vmProfile, plan, avoids); + } else { + return DeploymentPlanner.PlannerResourceUsage.Shared; + } + + } + + @DB + private boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerResourceUsage resourceUsageRequired) { + // TODO Auto-generated method stub + // check if this host has been picked up by some other planner + // exclusively + // if planner can work with shared host, check if this host has + // been marked as 'shared' + // else if planner needs dedicated host, + + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null) { + final long id = reservationEntry.getId(); + PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage(); + + if (hostResourceType != null) { + if (hostResourceType == resourceUsageRequired) { + return true; + } else { + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + hostResourceType); + return false; + } + } else { + final PlannerResourceUsage hostResourceTypeFinal = hostResourceType; + // reserve the host for required resourceType + // let us lock the reservation entry before updating. + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); + if (lockedEntry == null) { + s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + return false; + } + // check before updating + if (lockedEntry.getResourceUsage() == null) { + lockedEntry.setResourceUsage(resourceUsageRequired); + _plannerHostReserveDao.persist(lockedEntry); + return true; + } else { + // someone updated it earlier. check if we can still use it + if (lockedEntry.getResourceUsage() == resourceUsageRequired) { + return true; + } else { + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + + hostResourceTypeFinal); + return false; + } + } + } + }); + + } + + } + + return false; + } + + @DB + public boolean checkHostReservationRelease(final Long hostId) { + + if (hostId != null) { + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null && reservationEntry.getResourceUsage() != null) { + + // check if any VMs are starting or running on this host + List vms = _vmInstanceDao.listUpByHostId(hostId); + if (vms.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); + } + return false; + } + + List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); + if (vmsByLastHostId.size() > 0) { + // check if any VMs are within skip.counting.hours, if yes + // we + // cannot release the host + for (VMInstanceVO stoppedVM : vmsByLastHostId) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime().getTime()) / 1000; + if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); + } + return false; + } + } + } + + // check if any VMs are stopping on or migrating to this host + List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); + if (vmsStoppingMigratingByHostId.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs stopping/migrating on host " + hostId); + } + return false; + } + + // check if any VMs are in starting state with no hostId set yet + // - + // just ignore host release to avoid race condition + List vmsStartingNoHost = _vmInstanceDao.listStartingWithNoHostId(); + + if (vmsStartingNoHost.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs starting as of now and no hostId yet stored"); + } + return false; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + } + + final long id = reservationEntry.getId(); + + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); + if (lockedEntry == null) { + s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + return false; + } + // check before updating + if (lockedEntry.getResourceUsage() != null) { + lockedEntry.setResourceUsage(null); + _plannerHostReserveDao.persist(lockedEntry); + return true; + } + + return false; + } + }); + } + + } + return false; + } + + class HostReservationReleaseChecker extends ManagedContextTimerTask { + @Override + protected void runInContext() { + try { + s_logger.debug("Checking if any host reservation can be released ... "); + checkHostReservations(); + s_logger.debug("Done running HostReservationReleaseChecker ... "); + } catch (Throwable t) { + s_logger.error("Exception in HostReservationReleaseChecker", t); + } + } + } + + private void checkHostReservations() { + List reservedHosts = _plannerHostReserveDao.listAllReservedHosts(); + + for (PlannerHostReservationVO hostReservation : reservedHosts) { + HostVO host = _hostDao.findById(hostReservation.getHostId()); + if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { + checkHostReservationRelease(hostReservation.getHostId()); + } + } + + } + + @Override + public boolean processAnswers(long agentId, long seq, Answer[] answers) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean processCommands(long agentId, long seq, Command[] commands) { + // TODO Auto-generated method stub + return false; + } + + @Override + public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { + if (!(cmd instanceof StartupRoutingCommand)) { + return; + } + + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); + if (reservationEntry == null) { + // record the host in this table + PlannerHostReservationVO newHost = new PlannerHostReservationVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId()); + _plannerHostReserveDao.persist(newHost); + } + + } + + @Override + public boolean processDisconnect(long agentId, Status state) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean isRecurring() { + // TODO Auto-generated method stub + return false; + } + + @Override + public int getTimeout() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public boolean processTimeout(long agentId, long seq) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean configure(final String name, final Map params) throws ConfigurationException { + _agentMgr.registerForHostEvents(this, true, false, true); + VirtualMachine.State.getStateMachine().registerListener(this); + _messageBus.subscribe("VM_ReservedCapacity_Free", new MessageSubscriber() { + @Override + public void onPublishMessage(String senderAddress, String subject, Object obj) { + VMInstanceVO vm = ((VMInstanceVO)obj); + s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + ", checking if host reservation can be released for host:" + + vm.getLastHostId()); + Long hostId = vm.getLastHostId(); + checkHostReservationRelease(hostId); + } + }); + + _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); + + String hostReservationReleasePeriod = _configDao.getValue(Config.HostReservationReleasePeriod.key()); + if (hostReservationReleasePeriod != null) { + _hostReservationReleasePeriod = Long.parseLong(hostReservationReleasePeriod); + if (_hostReservationReleasePeriod <= 0) + _hostReservationReleasePeriod = Long.parseLong(Config.HostReservationReleasePeriod.getDefaultValue()); + } + + _timer = new Timer("HostReservationReleaseChecker"); + + _nodeId = ManagementServerNode.getManagementServerId(); + + return super.configure(name, params); + } + + @Override + public boolean start() { + _timer.schedule(new HostReservationReleaseChecker(), INITIAL_RESERVATION_RELEASE_CHECKER_DELAY, _hostReservationReleasePeriod); + cleanupVMReservations(); + return true; + } + + @Override + public boolean stop() { + _timer.cancel(); + return true; + } + + @Override + public void cleanupVMReservations() { + List reservations = _reservationDao.listAll(); + + for (VMReservationVO reserv : reservations) { + VMInstanceVO vm = _vmInstanceDao.findById(reserv.getVmId()); + if (vm != null) { + if (vm.getState() == State.Starting || (vm.getState() == State.Stopped && vm.getLastHostId() == null)) { + continue; + } else { + // delete reservation + _reservationDao.remove(reserv.getId()); + } + } else { + // delete reservation + _reservationDao.remove(reserv.getId()); + } + } + } + + // /refactoring planner methods + private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc, + DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList PlannerAvoidOutput) { + + if (s_logger.isTraceEnabled()) { + s_logger.trace("ClusterId List to consider: " + clusterList); + } + + for (Long clusterId : clusterList) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + + if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { + s_logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); + avoid.addCluster(clusterVO.getId()); + continue; + } + + s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + // search for resources(hosts and storage) under this zone, pod, + // cluster. + DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, + plan.getReservationContext()); + + // find suitable hosts under this cluster, need as many hosts as we + // get. + List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); + // if found suitable hosts in this cluster, find suitable storage + // pools for each volume of the VM + if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { + Pod pod = _podDao.findById(clusterVO.getPodId()); + DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); + return dest; + } + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential host and pool for the VM + if (!suitableVolumeStoragePools.isEmpty()) { + Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid, + resourceUsageRequired); + + if (potentialResources != null) { + Pod pod = _podDao.findById(clusterVO.getPodId()); + Host host = _hostDao.findById(potentialResources.first().getId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from destination, since + // we don't have to prepare this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } else { + s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + } + } else { + s_logger.debug("No suitable hosts found under this Cluster: " + clusterId); + } + + if (canAvoidCluster(clusterVO, avoid, PlannerAvoidOutput, vmProfile)) { + avoid.addCluster(clusterVO.getId()); + } + } + s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); + return null; + } + + private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput, VirtualMachineProfile vmProfile) { + + ExcludeList allocatorAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), + avoids.getPoolsToAvoid()); + + // remove any hosts/pools that the planners might have added + // to get the list of hosts/pools that Allocators flagged as 'avoid' + + resetAvoidSet(allocatorAvoidOutput, plannerAvoidOutput); + + // if all hosts or all pools in the cluster are in avoid set after this + // pass, then put the cluster in avoid set. + boolean avoidAllHosts = true, avoidAllPools = true; + + List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(), clusterVO.getPodId(), clusterVO.getDataCenterId(), null); + for (HostVO host : allhostsInCluster) { + if (!allocatorAvoidOutput.shouldAvoid(host)) { + // there's some host in the cluster that is not yet in avoid set + avoidAllHosts = false; + break; + } + } + + // all hosts in avoid set, avoid the cluster. Otherwise check the pools + if (avoidAllHosts) { + return true; + } + + // Cluster can be put in avoid set in following scenarios: + // 1. If storage allocators haven't put any pools in avoid set means either no pools in cluster + // or pools not suitable for the allocators to handle or there is no + // linkage of any suitable host to any of the pools in cluster + // 2. If all 'shared' or 'local' pools are in avoid set + if (allocatorAvoidOutput.getPoolsToAvoid() != null && !allocatorAvoidOutput.getPoolsToAvoid().isEmpty()) { + + Pair storageRequirements = findVMStorageRequirements(vmProfile); + boolean vmRequiresSharedStorage = storageRequirements.first(); + boolean vmRequiresLocalStorege = storageRequirements.second(); + + if (vmRequiresSharedStorage) { + // check shared pools + List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); + for (StoragePoolVO pool : allPoolsInCluster) { + if (!allocatorAvoidOutput.shouldAvoid(pool)) { + // there's some pool in the cluster that is not yet in avoid set + avoidAllPools = false; + break; + } + } + } + + if (vmRequiresLocalStorege) { + // check local pools + List allLocalPoolsInCluster = _storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); + for (StoragePoolVO pool : allLocalPoolsInCluster) { + if (!allocatorAvoidOutput.shouldAvoid(pool)) { + // there's some pool in the cluster that is not yet + // in avoid set + avoidAllPools = false; + break; + } + } + } + } + + if (avoidAllHosts || avoidAllPools) { + return true; + } + return false; + } + + private Pair findVMStorageRequirements(VirtualMachineProfile vmProfile) { + + boolean requiresShared = false, requiresLocal = false; + + List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); + + // for each volume find whether shared or local pool is required + for (VolumeVO toBeCreated : volumesTobeCreated) { + DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); + + if (diskOffering != null) { + if (diskOffering.getUseLocalStorage()) { + requiresLocal = true; + } else { + requiresShared = true; + } + } + } + + return new Pair(requiresShared, requiresLocal); + } + + protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, + ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired) { + s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); + + boolean hostCanAccessPool = false; + boolean haveEnoughSpace = false; + Map storage = new HashMap(); + TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { + @Override + public int compare(Volume v1, Volume v2) { + if (v1.getSize() < v2.getSize()) + return 1; + else + return -1; + } + }); + volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); + boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; + for (Host potentialHost : suitableHosts) { + Map> volumeAllocationMap = new HashMap>(); + for (Volume vol : volumesOrderBySizeDesc) { + haveEnoughSpace = false; + s_logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + List volumePoolList = suitableVolumeStoragePools.get(vol); + hostCanAccessPool = false; + for (StoragePool potentialSPool : volumePoolList) { + if (hostCanAccessSPool(potentialHost, potentialSPool)) { + hostCanAccessPool = true; + if (multipleVolume) { + List requestVolumes = null; + if (volumeAllocationMap.containsKey(potentialSPool)) + requestVolumes = volumeAllocationMap.get(potentialSPool); + else + requestVolumes = new ArrayList(); + requestVolumes.add(vol); + + if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) + continue; + volumeAllocationMap.put(potentialSPool, requestVolumes); + } + storage.put(vol, potentialSPool); + haveEnoughSpace = true; + break; + } + } + if (!hostCanAccessPool) { + break; + } + if (!haveEnoughSpace) { + s_logger.warn("insufficient capacity to allocate all volumes"); + break; + } + } + if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { + s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); + return new Pair>(potentialHost, storage); + } else { + avoid.addHost(potentialHost.getId()); + } + } + s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); + return null; + } + + protected boolean hostCanAccessSPool(Host host, StoragePool pool) { + boolean hostCanAccessSPool = false; + + StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); + if (hostPoolLinkage != null) { + hostCanAccessSPool = true; + } + + s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); + return hostCanAccessSPool; + } + + protected List findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + List suitableHosts = new ArrayList(); + for (HostAllocator allocator : _hostAllocators) { + suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); + if (suitableHosts != null && !suitableHosts.isEmpty()) { + break; + } + } + + if (suitableHosts.isEmpty()) { + s_logger.debug("No suitable hosts found"); + } + return suitableHosts; + } + + protected Pair>, List> + findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); + Map> suitableVolumeStoragePools = new HashMap>(); + List readyAndReusedVolumes = new ArrayList(); + + // There should be atleast the ROOT volume of the VM in usable state + if (volumesTobeCreated.isEmpty()) { + throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM"); + } + + // for each volume find list of suitable storage pools by calling the + // allocators + for (VolumeVO toBeCreated : volumesTobeCreated) { + s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); + + // If the plan specifies a poolId, it means that this VM's ROOT + // volume is ready and the pool should be reused. + // In this case, also check if rest of the volumes are ready and can + // be reused. + if (plan.getPoolId() != null) { + s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); + List suitablePools = new ArrayList(); + StoragePool pool = null; + if (toBeCreated.getPoolId() != null) { + pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); + } else { + pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); + } + + if (!pool.isInMaintenance()) { + if (!avoid.shouldAvoid(pool)) { + long exstPoolDcId = pool.getDataCenterId(); + long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; + long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; + boolean canReusePool = false; + if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) { + canReusePool = true; + } else if (plan.getDataCenterId() == exstPoolDcId) { + DataStore dataStore = this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE) { + canReusePool = true; + } + } else { + s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); + canReusePool = false; + } + + if (canReusePool) { + s_logger.debug("Planner need not allocate a pool for this volume since its READY"); + suitablePools.add(pool); + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { + readyAndReusedVolumes.add(toBeCreated); + } + continue; + } + } else { + s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); + } + } else { + s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("We need to allocate new storagepool for this volume"); + } + if (!isRootAdmin(plan.getReservationContext())) { + if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); + s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); + } + // Cannot find suitable storage pools under this cluster for + // this volume since allocation_state is disabled. + // - remove any suitable pools found for other volumes. + // All volumes should get suitable pools under this cluster; + // else we cant use this cluster. + suitableVolumeStoragePools.clear(); + break; + } + } + + s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); + + DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); + DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); + + boolean useLocalStorage = false; + if (vmProfile.getType() != VirtualMachine.Type.User) { + String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key()); + if (ssvmUseLocalStorage.equalsIgnoreCase("true")) { + useLocalStorage = true; + } + } else { + useLocalStorage = diskOffering.getUseLocalStorage(); + + // TODO: this is a hacking fix for the problem of deploy + // ISO-based VM on local storage + // when deploying VM based on ISO, we have a service offering + // and an additional disk offering, use-local storage flag is + // actually + // saved in service offering, overrde the flag from service + // offering when it is a ROOT disk + if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { + if (toBeCreated.getVolumeType() == Volume.Type.ROOT) + useLocalStorage = true; + } + } + diskProfile.setUseLocalStorage(useLocalStorage); + + boolean foundPotentialPools = false; + for (StoragePoolAllocator allocator : _storagePoolAllocators) { + final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); + if (suitablePools != null && !suitablePools.isEmpty()) { + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + foundPotentialPools = true; + break; + } + } + + if (!foundPotentialPools) { + s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); + // No suitable storage pools found under this cluster for this + // volume. - remove any suitable pools found for other volumes. + // All volumes should get suitable pools under this cluster; + // else we cant use this cluster. + suitableVolumeStoragePools.clear(); + break; + } + } + + if (suitableVolumeStoragePools.isEmpty()) { + s_logger.debug("No suitable pools found"); + } + + return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); + } + + private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) { + // Check if the zone exists in the system + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { + s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + return false; + } + + Pod pod = _podDao.findById(podId); + if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { + s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + return false; + } + + Cluster cluster = _clusterDao.findById(clusterId); + if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { + s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + return false; + } + + return true; + } + + private boolean isRootAdmin(ReservationContext reservationContext) { + if (reservationContext != null) { + if (reservationContext.getAccount() != null) { + return _accountMgr.isRootAdmin(reservationContext.getAccount().getType()); + } else { + return false; + } + } + return false; + } + + @DB + @Override + public String finalizeReservation(final DeployDestination plannedDestination, final VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) + throws InsufficientServerCapacityException, AffinityConflictException { + + final VirtualMachine vm = vmProfile.getVirtualMachine(); + final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); + + return Transaction.execute(new TransactionCallback() { + @Override + public String doInTransaction(TransactionStatus status) { + boolean saveReservation = true; + + if (vmGroupCount > 0) { + List groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId()); + SearchCriteria criteria = _affinityGroupDao.createSearchCriteria(); + criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()])); + List groups = _affinityGroupDao.lockRows(criteria, null, true); + + for (AffinityGroupProcessor processor : _affinityProcessors) { + if (!processor.check(vmProfile, plannedDestination)) { + saveReservation = false; + break; + } + } + } + + if (saveReservation) { + VMReservationVO vmReservation = new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(), plannedDestination.getPod().getId(), + plannedDestination.getCluster().getId(), plannedDestination.getHost().getId()); + Map volumeReservationMap = new HashMap(); + + if (vm.getHypervisorType() != HypervisorType.BareMetal) { + for (Volume vo : plannedDestination.getStorageForDisks().keySet()) { + volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId()); + } + vmReservation.setVolumeReservation(volumeReservationMap); + } + _reservationDao.persist(vmReservation); + return vmReservation.getUuid(); + } + + return null; + } + }); + } + + @Override + public boolean preStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, boolean status, Object opaque) { + return true; + } + + @Override + public boolean postStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, boolean status, Object opaque) { + if (!status) { + return false; + } + if ((oldState == State.Starting) && (newState != State.Starting)) { + // cleanup all VM reservation entries + SearchCriteria sc = _reservationDao.createSearchCriteria(); + sc.addAnd("vmId", SearchCriteria.Op.EQ, vo.getId()); + _reservationDao.expunge(sc); + } + return true; + } +} diff --git a/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java index b8fb77b940a..dfcf687a31c 100644 --- a/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java +++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java @@ -1,32 +1,32 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy.dao; - -import java.util.List; - -import com.cloud.deploy.PlannerHostReservationVO; -import com.cloud.utils.db.GenericDao; - -public interface PlannerHostReservationDao extends GenericDao { - - PlannerHostReservationVO findByHostId(long hostId); - - List listAllReservedHosts(); - - List listAllDedicatedHosts(); - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy.dao; + +import java.util.List; + +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.utils.db.GenericDao; + +public interface PlannerHostReservationDao extends GenericDao { + + PlannerHostReservationVO findByHostId(long hostId); + + List listAllReservedHosts(); + + List listAllDedicatedHosts(); + +} diff --git a/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java index 5ee753036ab..074c621ed03 100644 --- a/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java +++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java @@ -1,75 +1,75 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy.dao; - -import java.util.List; - -import javax.annotation.PostConstruct; -import javax.ejb.Local; - -import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; -import com.cloud.deploy.PlannerHostReservationVO; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; - -@Local(value = {PlannerHostReservationDao.class}) -public class PlannerHostReservationDaoImpl extends GenericDaoBase implements PlannerHostReservationDao { - - private SearchBuilder _hostIdSearch; - private SearchBuilder _reservedHostSearch; - private SearchBuilder _dedicatedHostSearch;; - - public PlannerHostReservationDaoImpl() { - - } - - @PostConstruct - protected void init() { - _hostIdSearch = createSearchBuilder(); - _hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ); - _hostIdSearch.done(); - - _reservedHostSearch = createSearchBuilder(); - _reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL); - _reservedHostSearch.done(); - - _dedicatedHostSearch = createSearchBuilder(); - _dedicatedHostSearch.and("usage", _dedicatedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.EQ); - _dedicatedHostSearch.done(); - } - - @Override - public PlannerHostReservationVO findByHostId(long hostId) { - SearchCriteria sc = _hostIdSearch.create(); - sc.setParameters("hostId", hostId); - return findOneBy(sc); - } - - @Override - public List listAllReservedHosts() { - SearchCriteria sc = _reservedHostSearch.create(); - return listBy(sc); - } - - @Override - public List listAllDedicatedHosts() { - SearchCriteria sc = _dedicatedHostSearch.create(); - sc.setParameters("usage", PlannerResourceUsage.Dedicated); - return listBy(sc); - } +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy.dao; + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.ejb.Local; + +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Local(value = {PlannerHostReservationDao.class}) +public class PlannerHostReservationDaoImpl extends GenericDaoBase implements PlannerHostReservationDao { + + private SearchBuilder _hostIdSearch; + private SearchBuilder _reservedHostSearch; + private SearchBuilder _dedicatedHostSearch;; + + public PlannerHostReservationDaoImpl() { + + } + + @PostConstruct + protected void init() { + _hostIdSearch = createSearchBuilder(); + _hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ); + _hostIdSearch.done(); + + _reservedHostSearch = createSearchBuilder(); + _reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL); + _reservedHostSearch.done(); + + _dedicatedHostSearch = createSearchBuilder(); + _dedicatedHostSearch.and("usage", _dedicatedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.EQ); + _dedicatedHostSearch.done(); + } + + @Override + public PlannerHostReservationVO findByHostId(long hostId) { + SearchCriteria sc = _hostIdSearch.create(); + sc.setParameters("hostId", hostId); + return findOneBy(sc); + } + + @Override + public List listAllReservedHosts() { + SearchCriteria sc = _reservedHostSearch.create(); + return listBy(sc); + } + + @Override + public List listAllDedicatedHosts() { + SearchCriteria sc = _dedicatedHostSearch.create(); + sc.setParameters("usage", PlannerResourceUsage.Dedicated); + return listBy(sc); + } } \ No newline at end of file diff --git a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java index 2e862d6f568..1616bccb76f 100644 --- a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java +++ b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java @@ -1,383 +1,383 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.vm; - -import static org.junit.Assert.*; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import com.cloud.service.ServiceOfferingVO; -import com.cloud.storage.StorageManager; -import com.cloud.storage.dao.DiskOfferingDao; -import com.cloud.storage.dao.GuestOSCategoryDao; -import com.cloud.storage.dao.GuestOSDao; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.capacity.CapacityManager; -import com.cloud.capacity.dao.CapacityDao; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.agent.AgentManager; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.dao.DedicatedResourceDao; -import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DataCenterDeployment; -import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeploymentClusterPlanner; -import com.cloud.deploy.DeploymentPlanner; -import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; -import com.cloud.deploy.DeploymentPlanningManagerImpl; -import com.cloud.deploy.FirstFitPlanner; -import com.cloud.deploy.PlannerHostReservationVO; -import com.cloud.deploy.dao.PlannerHostReservationDao; - -import org.apache.cloudstack.affinity.AffinityGroupProcessor; -import org.apache.cloudstack.affinity.AffinityGroupService; -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.messagebus.MessageBus; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; - -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.ComponentScan; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.FilterType; -import org.springframework.context.annotation.ComponentScan.Filter; -import org.springframework.core.type.classreading.MetadataReader; -import org.springframework.core.type.classreading.MetadataReaderFactory; -import org.springframework.core.type.filter.TypeFilter; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; -import org.springframework.test.context.support.AnnotationConfigContextLoader; - -import com.cloud.exception.AffinityConflictException; -import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.user.AccountManager; -import com.cloud.utils.component.ComponentContext; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; - -@RunWith(SpringJUnit4ClassRunner.class) -@ContextConfiguration(loader = AnnotationConfigContextLoader.class) -public class DeploymentPlanningManagerImplTest { - - @Inject - DeploymentPlanningManagerImpl _dpm; - - @Inject - PlannerHostReservationDao _plannerHostReserveDao; - - @Inject - VirtualMachineProfileImpl vmProfile; - - @Inject - AffinityGroupVMMapDao _affinityGroupVMMapDao; - - @Inject - ExcludeList avoids; - - @Inject - DataCenterVO dc; - - @Inject - DataCenterDao _dcDao; - - @Inject - FirstFitPlanner _planner; - - @Inject - ClusterDao _clusterDao; - - @Inject - DedicatedResourceDao _dedicatedDao; - - private static long domainId = 5L; - - private static long dataCenterId = 1L; - - @BeforeClass - public static void setUp() throws ConfigurationException { - } - - @Before - public void testSetUp() { - ComponentContext.initComponentsLifeCycle(); - - PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared); - Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO); - Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO); - Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L); - - VMInstanceVO vm = new VMInstanceVO(); - Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm); - - Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); - Mockito.when(dc.getId()).thenReturn(dataCenterId); - - ClusterVO clusterVO = new ClusterVO(); - clusterVO.setHypervisorType(HypervisorType.XenServer.toString()); - Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO); - - Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner"); - List planners = new ArrayList(); - planners.add(_planner); - _dpm.setPlanners(planners); - - } - - @Test - public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException { - ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, "test dpm", false, false, null, false, - VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); - Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); - - DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); - - Mockito.when(avoids.shouldAvoid((DataCenterVO)Mockito.anyObject())).thenReturn(true); - DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); - assertNull("DataCenter is in avoid set, destination should be null! ", dest); - } - - @Test - public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException { - ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, "test dpm", false, false, null, false, - VirtualMachine.Type.User, domainId, null, "UserDispersingPlanner"); - Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); - - DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); - Mockito.when(avoids.shouldAvoid((DataCenterVO)Mockito.anyObject())).thenReturn(false); - - Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false); - DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); - assertNull("Planner cannot handle, destination should be null! ", dest); - } - - @Test - public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException { - ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, "test dpm", false, false, null, false, - VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); - Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); - - DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); - Mockito.when(avoids.shouldAvoid((DataCenterVO)Mockito.anyObject())).thenReturn(false); - Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true); - - Mockito.when(((DeploymentClusterPlanner)_planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null); - DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); - assertNull("Planner cannot handle, destination should be null! ", dest); - } - - @Configuration - @ComponentScan(basePackageClasses = {DeploymentPlanningManagerImpl.class}, - includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, - useDefaultFilters = false) - public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { - - @Bean - public FirstFitPlanner firstFitPlanner() { - return Mockito.mock(FirstFitPlanner.class); - } - - @Bean - public DeploymentPlanner deploymentPlanner() { - return Mockito.mock(DeploymentPlanner.class); - } - - @Bean - public DataCenterVO dataCenter() { - return Mockito.mock(DataCenterVO.class); - } - - @Bean - public ExcludeList excludeList() { - return Mockito.mock(ExcludeList.class); - } - - @Bean - public VirtualMachineProfileImpl virtualMachineProfileImpl() { - return Mockito.mock(VirtualMachineProfileImpl.class); - } - - @Bean - public ClusterDetailsDao clusterDetailsDao() { - return Mockito.mock(ClusterDetailsDao.class); - } - - @Bean - public DataStoreManager cataStoreManager() { - return Mockito.mock(DataStoreManager.class); - } - - @Bean - public StorageManager storageManager() { - return Mockito.mock(StorageManager.class); - } - - @Bean - public HostDao hostDao() { - return Mockito.mock(HostDao.class); - } - - @Bean - public HostPodDao hostPodDao() { - return Mockito.mock(HostPodDao.class); - } - - @Bean - public ClusterDao clusterDao() { - return Mockito.mock(ClusterDao.class); - } - - @Bean - public DedicatedResourceDao dedicatedResourceDao() { - return Mockito.mock(DedicatedResourceDao.class); - } - - @Bean - public GuestOSDao guestOSDao() { - return Mockito.mock(GuestOSDao.class); - } - - @Bean - public GuestOSCategoryDao guestOSCategoryDao() { - return Mockito.mock(GuestOSCategoryDao.class); - } - - @Bean - public CapacityManager capacityManager() { - return Mockito.mock(CapacityManager.class); - } - - @Bean - public StoragePoolHostDao storagePoolHostDao() { - return Mockito.mock(StoragePoolHostDao.class); - } - - @Bean - public VolumeDao volumeDao() { - return Mockito.mock(VolumeDao.class); - } - - @Bean - public ConfigurationDao configurationDao() { - return Mockito.mock(ConfigurationDao.class); - } - - @Bean - public DiskOfferingDao diskOfferingDao() { - return Mockito.mock(DiskOfferingDao.class); - } - - @Bean - public PrimaryDataStoreDao primaryDataStoreDao() { - return Mockito.mock(PrimaryDataStoreDao.class); - } - - @Bean - public CapacityDao capacityDao() { - return Mockito.mock(CapacityDao.class); - } - - @Bean - public PlannerHostReservationDao plannerHostReservationDao() { - return Mockito.mock(PlannerHostReservationDao.class); - } - - @Bean - public AffinityGroupProcessor affinityGroupProcessor() { - return Mockito.mock(AffinityGroupProcessor.class); - } - - @Bean - public AffinityGroupDao affinityGroupDao() { - return Mockito.mock(AffinityGroupDao.class); - } - - @Bean - public AffinityGroupVMMapDao affinityGroupVMMapDao() { - return Mockito.mock(AffinityGroupVMMapDao.class); - } - - @Bean - public AccountManager accountManager() { - return Mockito.mock(AccountManager.class); - } - - @Bean - public AgentManager agentManager() { - return Mockito.mock(AgentManager.class); - } - - @Bean - public MessageBus messageBus() { - return Mockito.mock(MessageBus.class); - } - - @Bean - public UserVmDao userVMDao() { - return Mockito.mock(UserVmDao.class); - } - - @Bean - public VMInstanceDao vmInstanceDao() { - return Mockito.mock(VMInstanceDao.class); - } - - @Bean - public DataCenterDao dataCenterDao() { - return Mockito.mock(DataCenterDao.class); - } - - @Bean - public VMReservationDao reservationDao() { - return Mockito.mock(VMReservationDao.class); - } - - @Bean - public AffinityGroupService affinityGroupService() { - return Mockito.mock(AffinityGroupService.class); - } - - public static class Library implements TypeFilter { - - @Override - public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { - ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); - return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); - } - } - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import com.cloud.service.ServiceOfferingVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.agent.AgentManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentClusterPlanner; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.DeploymentPlanningManagerImpl; +import com.cloud.deploy.FirstFitPlanner; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.deploy.dao.PlannerHostReservationDao; + +import org.apache.cloudstack.affinity.AffinityGroupProcessor; +import org.apache.cloudstack.affinity.AffinityGroupService; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.user.AccountManager; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class DeploymentPlanningManagerImplTest { + + @Inject + DeploymentPlanningManagerImpl _dpm; + + @Inject + PlannerHostReservationDao _plannerHostReserveDao; + + @Inject + VirtualMachineProfileImpl vmProfile; + + @Inject + AffinityGroupVMMapDao _affinityGroupVMMapDao; + + @Inject + ExcludeList avoids; + + @Inject + DataCenterVO dc; + + @Inject + DataCenterDao _dcDao; + + @Inject + FirstFitPlanner _planner; + + @Inject + ClusterDao _clusterDao; + + @Inject + DedicatedResourceDao _dedicatedDao; + + private static long domainId = 5L; + + private static long dataCenterId = 1L; + + @BeforeClass + public static void setUp() throws ConfigurationException { + } + + @Before + public void testSetUp() { + ComponentContext.initComponentsLifeCycle(); + + PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared); + Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO); + Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO); + Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L); + + VMInstanceVO vm = new VMInstanceVO(); + Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm); + + Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + Mockito.when(dc.getId()).thenReturn(dataCenterId); + + ClusterVO clusterVO = new ClusterVO(); + clusterVO.setHypervisorType(HypervisorType.XenServer.toString()); + Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO); + + Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner"); + List planners = new ArrayList(); + planners.add(_planner); + _dpm.setPlanners(planners); + + } + + @Test + public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, "test dpm", false, false, null, false, + VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + + Mockito.when(avoids.shouldAvoid((DataCenterVO)Mockito.anyObject())).thenReturn(true); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("DataCenter is in avoid set, destination should be null! ", dest); + } + + @Test + public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, "test dpm", false, false, null, false, + VirtualMachine.Type.User, domainId, null, "UserDispersingPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + Mockito.when(avoids.shouldAvoid((DataCenterVO)Mockito.anyObject())).thenReturn(false); + + Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("Planner cannot handle, destination should be null! ", dest); + } + + @Test + public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, "test dpm", false, false, null, false, + VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + Mockito.when(avoids.shouldAvoid((DataCenterVO)Mockito.anyObject())).thenReturn(false); + Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true); + + Mockito.when(((DeploymentClusterPlanner)_planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("Planner cannot handle, destination should be null! ", dest); + } + + @Configuration + @ComponentScan(basePackageClasses = {DeploymentPlanningManagerImpl.class}, + includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, + useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public FirstFitPlanner firstFitPlanner() { + return Mockito.mock(FirstFitPlanner.class); + } + + @Bean + public DeploymentPlanner deploymentPlanner() { + return Mockito.mock(DeploymentPlanner.class); + } + + @Bean + public DataCenterVO dataCenter() { + return Mockito.mock(DataCenterVO.class); + } + + @Bean + public ExcludeList excludeList() { + return Mockito.mock(ExcludeList.class); + } + + @Bean + public VirtualMachineProfileImpl virtualMachineProfileImpl() { + return Mockito.mock(VirtualMachineProfileImpl.class); + } + + @Bean + public ClusterDetailsDao clusterDetailsDao() { + return Mockito.mock(ClusterDetailsDao.class); + } + + @Bean + public DataStoreManager cataStoreManager() { + return Mockito.mock(DataStoreManager.class); + } + + @Bean + public StorageManager storageManager() { + return Mockito.mock(StorageManager.class); + } + + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + + @Bean + public HostPodDao hostPodDao() { + return Mockito.mock(HostPodDao.class); + } + + @Bean + public ClusterDao clusterDao() { + return Mockito.mock(ClusterDao.class); + } + + @Bean + public DedicatedResourceDao dedicatedResourceDao() { + return Mockito.mock(DedicatedResourceDao.class); + } + + @Bean + public GuestOSDao guestOSDao() { + return Mockito.mock(GuestOSDao.class); + } + + @Bean + public GuestOSCategoryDao guestOSCategoryDao() { + return Mockito.mock(GuestOSCategoryDao.class); + } + + @Bean + public CapacityManager capacityManager() { + return Mockito.mock(CapacityManager.class); + } + + @Bean + public StoragePoolHostDao storagePoolHostDao() { + return Mockito.mock(StoragePoolHostDao.class); + } + + @Bean + public VolumeDao volumeDao() { + return Mockito.mock(VolumeDao.class); + } + + @Bean + public ConfigurationDao configurationDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public DiskOfferingDao diskOfferingDao() { + return Mockito.mock(DiskOfferingDao.class); + } + + @Bean + public PrimaryDataStoreDao primaryDataStoreDao() { + return Mockito.mock(PrimaryDataStoreDao.class); + } + + @Bean + public CapacityDao capacityDao() { + return Mockito.mock(CapacityDao.class); + } + + @Bean + public PlannerHostReservationDao plannerHostReservationDao() { + return Mockito.mock(PlannerHostReservationDao.class); + } + + @Bean + public AffinityGroupProcessor affinityGroupProcessor() { + return Mockito.mock(AffinityGroupProcessor.class); + } + + @Bean + public AffinityGroupDao affinityGroupDao() { + return Mockito.mock(AffinityGroupDao.class); + } + + @Bean + public AffinityGroupVMMapDao affinityGroupVMMapDao() { + return Mockito.mock(AffinityGroupVMMapDao.class); + } + + @Bean + public AccountManager accountManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public AgentManager agentManager() { + return Mockito.mock(AgentManager.class); + } + + @Bean + public MessageBus messageBus() { + return Mockito.mock(MessageBus.class); + } + + @Bean + public UserVmDao userVMDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public VMInstanceDao vmInstanceDao() { + return Mockito.mock(VMInstanceDao.class); + } + + @Bean + public DataCenterDao dataCenterDao() { + return Mockito.mock(DataCenterDao.class); + } + + @Bean + public VMReservationDao reservationDao() { + return Mockito.mock(VMReservationDao.class); + } + + @Bean + public AffinityGroupService affinityGroupService() { + return Mockito.mock(AffinityGroupService.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} diff --git a/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java b/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java index 909c3230785..3eb4029e7f7 100644 --- a/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java +++ b/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java @@ -1,293 +1,293 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.affinity; - -import static org.junit.Assert.assertNotNull; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.junit.After; -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.cloudstack.test.utils.SpringUtils; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.ComponentScan; -import org.springframework.context.annotation.ComponentScan.Filter; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.FilterType; -import org.springframework.core.type.classreading.MetadataReader; -import org.springframework.core.type.classreading.MetadataReaderFactory; -import org.springframework.core.type.filter.TypeFilter; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; -import org.springframework.test.context.support.AnnotationConfigContextLoader; - -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.test.utils.SpringUtils; - -import com.cloud.dc.dao.DedicatedResourceDao; -import com.cloud.event.ActionEventUtils; -import com.cloud.dc.dao.DedicatedResourceDao; -import com.cloud.deploy.DeploymentPlanner; -import com.cloud.domain.dao.DomainDao; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventVO; -import com.cloud.event.dao.EventDao; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.ResourceInUseException; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.AccountService; -import com.cloud.user.AccountVO; -import com.cloud.user.UserVO; -import com.cloud.user.DomainManager; -import com.cloud.user.dao.AccountDao; -import com.cloud.user.dao.UserDao; -import com.cloud.utils.component.ComponentContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.dao.UserVmDao; - -@RunWith(SpringJUnit4ClassRunner.class) -@ContextConfiguration(loader = AnnotationConfigContextLoader.class) -public class AffinityApiUnitTest { - - @Inject - AffinityGroupServiceImpl _affinityService; - - @Inject - AccountManager _acctMgr; - - @Inject - AffinityGroupProcessor _processor; - - @Inject - AffinityGroupDao _groupDao; - - @Inject - UserVmDao _vmDao; - - @Inject - AffinityGroupVMMapDao _affinityGroupVMMapDao; - - @Inject - AffinityGroupDao _affinityGroupDao; - - @Inject - ActionEventUtils _eventUtils; - - @Inject - AccountDao _accountDao; - - @Inject - EventDao _eventDao; - - @Inject - DedicatedResourceDao _dedicatedDao; - - private static long domainId = 5L; - - @BeforeClass - public static void setUpClass() throws ConfigurationException { - } - - @Before - public void setUp() { - ComponentContext.initComponentsLifeCycle(); - AccountVO acct = new AccountVO(200L); - acct.setType(Account.ACCOUNT_TYPE_NORMAL); - acct.setAccountName("user"); - acct.setDomainId(domainId); - - UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString()); - - CallContext.register(user, acct); - - when(_acctMgr.finalizeOwner((Account)anyObject(), anyString(), anyLong(), anyLong())).thenReturn(acct); - when(_processor.getType()).thenReturn("mock"); - when(_accountDao.findByIdIncludingRemoved(0L)).thenReturn(acct); - - List affinityProcessors = new ArrayList(); - affinityProcessors.add(_processor); - _affinityService.setAffinityGroupProcessors(affinityProcessors); - - AffinityGroupVO group = new AffinityGroupVO("group1", "mock", "mock group", domainId, 200L, ControlledEntity.ACLType.Account); - Mockito.when(_affinityGroupDao.persist(Mockito.any(AffinityGroupVO.class))).thenReturn(group); - Mockito.when(_affinityGroupDao.findById(Mockito.anyLong())).thenReturn(group); - Mockito.when(_affinityGroupDao.findByAccountAndName(Mockito.anyLong(), Mockito.anyString())).thenReturn(group); - Mockito.when(_affinityGroupDao.lockRow(Mockito.anyLong(), anyBoolean())).thenReturn(group); - Mockito.when(_affinityGroupDao.expunge(Mockito.anyLong())).thenReturn(true); - Mockito.when(_eventDao.persist(Mockito.any(EventVO.class))).thenReturn(new EventVO()); - } - - @After - public void tearDown() { - CallContext.unregister(); - } - - @Test - public void createAffinityGroupTest() { - when(_groupDao.isNameInUse(anyLong(), anyLong(), eq("group1"))).thenReturn(false); - AffinityGroup group = _affinityService.createAffinityGroup("user", domainId, "group1", "mock", "affinity group one"); - assertNotNull("Affinity group 'group1' of type 'mock' failed to create ", group); - - } - - @Test(expected = InvalidParameterValueException.class) - public void invalidAffinityTypeTest() { - AffinityGroup group = _affinityService.createAffinityGroup("user", domainId, "group1", "invalid", "affinity group one"); - - } - - @Test(expected = InvalidParameterValueException.class) - public void uniqueAffinityNameTest() { - when(_groupDao.isNameInUse(anyLong(), anyLong(), eq("group1"))).thenReturn(true); - AffinityGroup group2 = _affinityService.createAffinityGroup("user", domainId, "group1", "mock", "affinity group two"); - } - - @Test(expected = InvalidParameterValueException.class) - public void deleteAffinityGroupInvalidIdTest() throws ResourceInUseException { - when(_groupDao.findById(20L)).thenReturn(null); - _affinityService.deleteAffinityGroup(20L, "user", domainId, "group1"); - } - - @Test(expected = InvalidParameterValueException.class) - public void deleteAffinityGroupInvalidIdName() throws ResourceInUseException { - when(_groupDao.findByAccountAndName(200L, "group1")).thenReturn(null); - _affinityService.deleteAffinityGroup(null, "user", domainId, "group1"); - } - - @Test(expected = InvalidParameterValueException.class) - public void deleteAffinityGroupNullIdName() throws ResourceInUseException { - _affinityService.deleteAffinityGroup(null, "user", domainId, null); - } - - @Test(expected = InvalidParameterValueException.class) - public void updateAffinityGroupVMRunning() throws ResourceInUseException { - - UserVmVO vm = new UserVmVO(10L, "test", "test", 101L, HypervisorType.Any, 21L, false, false, domainId, 200L, 5L, "", "test", 1L); - vm.setState(VirtualMachine.State.Running); - when(_vmDao.findById(10L)).thenReturn(vm); - - List affinityGroupIds = new ArrayList(); - affinityGroupIds.add(20L); - - _affinityService.updateVMAffinityGroups(10L, affinityGroupIds); - } - - @Configuration - @ComponentScan(basePackageClasses = {AffinityGroupServiceImpl.class, ActionEventUtils.class}, includeFilters = {@Filter(value = TestConfiguration.Library.class, - type = FilterType.CUSTOM)}, useDefaultFilters = false) - public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { - - @Bean - public AccountDao accountDao() { - return Mockito.mock(AccountDao.class); - } - - @Bean - public AccountService accountService() { - return Mockito.mock(AccountService.class); - } - - @Bean - public AffinityGroupProcessor affinityGroupProcessor() { - return Mockito.mock(AffinityGroupProcessor.class); - } - - @Bean - public AffinityGroupDao affinityGroupDao() { - return Mockito.mock(AffinityGroupDao.class); - } - - @Bean - public AffinityGroupVMMapDao affinityGroupVMMapDao() { - return Mockito.mock(AffinityGroupVMMapDao.class); - } - - @Bean - public DedicatedResourceDao dedicatedResourceDao() { - return Mockito.mock(DedicatedResourceDao.class); - } - - @Bean - public AccountManager accountManager() { - return Mockito.mock(AccountManager.class); - } - - @Bean - public DomainManager domainManager() { - return Mockito.mock(DomainManager.class); - } - - @Bean - public EventDao eventDao() { - return Mockito.mock(EventDao.class); - } - - @Bean - public UserVmDao userVMDao() { - return Mockito.mock(UserVmDao.class); - } - - @Bean - public UserDao userDao() { - return Mockito.mock(UserDao.class); - } - - @Bean - public AffinityGroupDomainMapDao affinityGroupDomainMapDao() { - return Mockito.mock(AffinityGroupDomainMapDao.class); - } - - @Bean - public DomainDao domainDao() { - return Mockito.mock(DomainDao.class); - } - - public static class Library implements TypeFilter { - - @Override - public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { - ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); - return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); - } - } - } -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import static org.junit.Assert.assertNotNull; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.junit.After; +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.test.utils.SpringUtils; + +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventVO; +import com.cloud.event.dao.EventDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; +import com.cloud.user.UserVO; +import com.cloud.user.DomainManager; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class AffinityApiUnitTest { + + @Inject + AffinityGroupServiceImpl _affinityService; + + @Inject + AccountManager _acctMgr; + + @Inject + AffinityGroupProcessor _processor; + + @Inject + AffinityGroupDao _groupDao; + + @Inject + UserVmDao _vmDao; + + @Inject + AffinityGroupVMMapDao _affinityGroupVMMapDao; + + @Inject + AffinityGroupDao _affinityGroupDao; + + @Inject + ActionEventUtils _eventUtils; + + @Inject + AccountDao _accountDao; + + @Inject + EventDao _eventDao; + + @Inject + DedicatedResourceDao _dedicatedDao; + + private static long domainId = 5L; + + @BeforeClass + public static void setUpClass() throws ConfigurationException { + } + + @Before + public void setUp() { + ComponentContext.initComponentsLifeCycle(); + AccountVO acct = new AccountVO(200L); + acct.setType(Account.ACCOUNT_TYPE_NORMAL); + acct.setAccountName("user"); + acct.setDomainId(domainId); + + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString()); + + CallContext.register(user, acct); + + when(_acctMgr.finalizeOwner((Account)anyObject(), anyString(), anyLong(), anyLong())).thenReturn(acct); + when(_processor.getType()).thenReturn("mock"); + when(_accountDao.findByIdIncludingRemoved(0L)).thenReturn(acct); + + List affinityProcessors = new ArrayList(); + affinityProcessors.add(_processor); + _affinityService.setAffinityGroupProcessors(affinityProcessors); + + AffinityGroupVO group = new AffinityGroupVO("group1", "mock", "mock group", domainId, 200L, ControlledEntity.ACLType.Account); + Mockito.when(_affinityGroupDao.persist(Mockito.any(AffinityGroupVO.class))).thenReturn(group); + Mockito.when(_affinityGroupDao.findById(Mockito.anyLong())).thenReturn(group); + Mockito.when(_affinityGroupDao.findByAccountAndName(Mockito.anyLong(), Mockito.anyString())).thenReturn(group); + Mockito.when(_affinityGroupDao.lockRow(Mockito.anyLong(), anyBoolean())).thenReturn(group); + Mockito.when(_affinityGroupDao.expunge(Mockito.anyLong())).thenReturn(true); + Mockito.when(_eventDao.persist(Mockito.any(EventVO.class))).thenReturn(new EventVO()); + } + + @After + public void tearDown() { + CallContext.unregister(); + } + + @Test + public void createAffinityGroupTest() { + when(_groupDao.isNameInUse(anyLong(), anyLong(), eq("group1"))).thenReturn(false); + AffinityGroup group = _affinityService.createAffinityGroup("user", domainId, "group1", "mock", "affinity group one"); + assertNotNull("Affinity group 'group1' of type 'mock' failed to create ", group); + + } + + @Test(expected = InvalidParameterValueException.class) + public void invalidAffinityTypeTest() { + AffinityGroup group = _affinityService.createAffinityGroup("user", domainId, "group1", "invalid", "affinity group one"); + + } + + @Test(expected = InvalidParameterValueException.class) + public void uniqueAffinityNameTest() { + when(_groupDao.isNameInUse(anyLong(), anyLong(), eq("group1"))).thenReturn(true); + AffinityGroup group2 = _affinityService.createAffinityGroup("user", domainId, "group1", "mock", "affinity group two"); + } + + @Test(expected = InvalidParameterValueException.class) + public void deleteAffinityGroupInvalidIdTest() throws ResourceInUseException { + when(_groupDao.findById(20L)).thenReturn(null); + _affinityService.deleteAffinityGroup(20L, "user", domainId, "group1"); + } + + @Test(expected = InvalidParameterValueException.class) + public void deleteAffinityGroupInvalidIdName() throws ResourceInUseException { + when(_groupDao.findByAccountAndName(200L, "group1")).thenReturn(null); + _affinityService.deleteAffinityGroup(null, "user", domainId, "group1"); + } + + @Test(expected = InvalidParameterValueException.class) + public void deleteAffinityGroupNullIdName() throws ResourceInUseException { + _affinityService.deleteAffinityGroup(null, "user", domainId, null); + } + + @Test(expected = InvalidParameterValueException.class) + public void updateAffinityGroupVMRunning() throws ResourceInUseException { + + UserVmVO vm = new UserVmVO(10L, "test", "test", 101L, HypervisorType.Any, 21L, false, false, domainId, 200L, 5L, "", "test", 1L); + vm.setState(VirtualMachine.State.Running); + when(_vmDao.findById(10L)).thenReturn(vm); + + List affinityGroupIds = new ArrayList(); + affinityGroupIds.add(20L); + + _affinityService.updateVMAffinityGroups(10L, affinityGroupIds); + } + + @Configuration + @ComponentScan(basePackageClasses = {AffinityGroupServiceImpl.class, ActionEventUtils.class}, includeFilters = {@Filter(value = TestConfiguration.Library.class, + type = FilterType.CUSTOM)}, useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public AccountDao accountDao() { + return Mockito.mock(AccountDao.class); + } + + @Bean + public AccountService accountService() { + return Mockito.mock(AccountService.class); + } + + @Bean + public AffinityGroupProcessor affinityGroupProcessor() { + return Mockito.mock(AffinityGroupProcessor.class); + } + + @Bean + public AffinityGroupDao affinityGroupDao() { + return Mockito.mock(AffinityGroupDao.class); + } + + @Bean + public AffinityGroupVMMapDao affinityGroupVMMapDao() { + return Mockito.mock(AffinityGroupVMMapDao.class); + } + + @Bean + public DedicatedResourceDao dedicatedResourceDao() { + return Mockito.mock(DedicatedResourceDao.class); + } + + @Bean + public AccountManager accountManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public DomainManager domainManager() { + return Mockito.mock(DomainManager.class); + } + + @Bean + public EventDao eventDao() { + return Mockito.mock(EventDao.class); + } + + @Bean + public UserVmDao userVMDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public UserDao userDao() { + return Mockito.mock(UserDao.class); + } + + @Bean + public AffinityGroupDomainMapDao affinityGroupDomainMapDao() { + return Mockito.mock(AffinityGroupDomainMapDao.class); + } + + @Bean + public DomainDao domainDao() { + return Mockito.mock(DomainDao.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +}